2 * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef ARM64Assembler_h
27 #define ARM64Assembler_h
29 #if ENABLE(ASSEMBLER) && CPU(ARM64)
31 #include "AssemblerBuffer.h"
32 #include "AssemblerCommon.h"
34 #include <wtf/Assertions.h>
35 #include <wtf/Vector.h>
38 #define CHECK_DATASIZE_OF(datasize) ASSERT(datasize == 32 || datasize == 64)
39 #define DATASIZE_OF(datasize) ((datasize == 64) ? Datasize_64 : Datasize_32)
40 #define MEMOPSIZE_OF(datasize) ((datasize == 8 || datasize == 128) ? MemOpSize_8_or_128 : (datasize == 16) ? MemOpSize_16 : (datasize == 32) ? MemOpSize_32 : MemOpSize_64)
41 #define CHECK_DATASIZE() CHECK_DATASIZE_OF(datasize)
42 #define CHECK_VECTOR_DATASIZE() ASSERT(datasize == 64 || datasize == 128)
43 #define DATASIZE DATASIZE_OF(datasize)
44 #define MEMOPSIZE MEMOPSIZE_OF(datasize)
45 #define CHECK_FP_MEMOP_DATASIZE() ASSERT(datasize == 8 || datasize == 16 || datasize == 32 || datasize == 64 || datasize == 128)
46 #define MEMPAIROPSIZE_INT(datasize) ((datasize == 64) ? MemPairOp_64 : MemPairOp_32)
47 #define MEMPAIROPSIZE_FP(datasize) ((datasize == 128) ? MemPairOp_V128 : (datasize == 64) ? MemPairOp_V64 : MemPairOp_32)
51 ALWAYS_INLINE bool isInt7(int32_t value)
53 return value == ((value << 25) >> 25);
56 ALWAYS_INLINE bool isInt11(int32_t value)
58 return value == ((value << 21) >> 21);
61 ALWAYS_INLINE bool isUInt5(int32_t value)
63 return !(value & ~0x1f);
68 explicit UInt5(int value)
71 ASSERT(isUInt5(value));
74 operator int() { return m_value; }
82 explicit UInt12(int value)
85 ASSERT(isUInt12(value));
88 operator int() { return m_value; }
96 explicit PostIndex(int value)
99 ASSERT(isInt9(value));
102 operator int() { return m_value; }
110 explicit PreIndex(int value)
113 ASSERT(isInt9(value));
116 operator int() { return m_value; }
122 class PairPostIndex {
124 explicit PairPostIndex(int value)
127 ASSERT(isInt11(value));
130 operator int() { return m_value; }
138 explicit PairPreIndex(int value)
141 ASSERT(isInt11(value));
144 operator int() { return m_value; }
150 typedef ARM64LogicalImmediate LogicalImmediate;
152 inline uint16_t getHalfword(uint64_t value, int which)
154 return value >> (which << 4);
157 namespace ARM64Registers {
159 #define FOR_EACH_CPU_REGISTER(V) \
160 FOR_EACH_CPU_GPREGISTER(V) \
161 FOR_EACH_CPU_SPECIAL_REGISTER(V) \
162 FOR_EACH_CPU_FPREGISTER(V)
164 // The following are defined as pairs of the following value:
165 // 1. type of the storage needed to save the register value by the JIT probe.
166 // 2. name of the register.
167 #define FOR_EACH_CPU_GPREGISTER(V) \
168 /* Parameter/result registers */ \
177 /* Indirect result location register */ \
179 /* Temporary registers */ \
187 /* Intra-procedure-call scratch registers (temporary) */ \
190 /* Platform Register (temporary) */ \
208 #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
213 // ARM64 always has 32 FPU registers 128-bits each. See http://llvm.org/devmtg/2012-11/Northover-AArch64.pdf
214 // and Section 5.1.2 in http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055b/IHI0055B_aapcs64.pdf.
215 // However, we only use them for 64-bit doubles.
216 #define FOR_EACH_CPU_FPREGISTER(V) \
217 /* Parameter/result registers */ \
226 /* Callee-saved (up to 64-bits only!) */ \
235 /* Temporary registers */ \
254 #define DECLARE_REGISTER(_type, _regName) _regName,
255 FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
256 #undef DECLARE_REGISTER
266 #define DECLARE_REGISTER(_type, _regName) _regName,
267 FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
268 #undef DECLARE_REGISTER
271 static constexpr bool isSp(RegisterID reg) { return reg == sp; }
272 static constexpr bool isZr(RegisterID reg) { return reg == zr; }
274 } // namespace ARM64Registers
276 class ARM64Assembler {
278 typedef ARM64Registers::RegisterID RegisterID;
279 typedef ARM64Registers::FPRegisterID FPRegisterID;
281 static constexpr RegisterID firstRegister() { return ARM64Registers::x0; }
282 static constexpr RegisterID lastRegister() { return ARM64Registers::sp; }
284 static constexpr FPRegisterID firstFPRegister() { return ARM64Registers::q0; }
285 static constexpr FPRegisterID lastFPRegister() { return ARM64Registers::q31; }
288 static constexpr bool isSp(RegisterID reg) { return ARM64Registers::isSp(reg); }
289 static constexpr bool isZr(RegisterID reg) { return ARM64Registers::isZr(reg); }
293 : m_indexOfLastWatchpoint(INT_MIN)
294 , m_indexOfTailOfLastWatchpoint(INT_MIN)
298 AssemblerBuffer& buffer() { return m_buffer; }
300 // (HS, LO, HI, LS) -> (AE, B, A, BE)
301 // (VS, VC) -> (O, NO)
305 ConditionHS, ConditionCS = ConditionHS,
306 ConditionLO, ConditionCC = ConditionLO,
321 static Condition invert(Condition cond)
323 return static_cast<Condition>(cond ^ 1);
349 #define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 4) | (index))
350 #define JUMP_ENUM_SIZE(jump) ((jump) >> 4)
351 enum JumpType { JumpFixed = JUMP_ENUM_WITH_SIZE(0, 0),
352 JumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 1 * sizeof(uint32_t)),
353 JumpCondition = JUMP_ENUM_WITH_SIZE(2, 2 * sizeof(uint32_t)),
354 JumpCompareAndBranch = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint32_t)),
355 JumpTestBit = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint32_t)),
356 JumpNoConditionFixedSize = JUMP_ENUM_WITH_SIZE(5, 1 * sizeof(uint32_t)),
357 JumpConditionFixedSize = JUMP_ENUM_WITH_SIZE(6, 2 * sizeof(uint32_t)),
358 JumpCompareAndBranchFixedSize = JUMP_ENUM_WITH_SIZE(7, 2 * sizeof(uint32_t)),
359 JumpTestBitFixedSize = JUMP_ENUM_WITH_SIZE(8, 2 * sizeof(uint32_t)),
362 LinkInvalid = JUMP_ENUM_WITH_SIZE(0, 0),
363 LinkJumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 1 * sizeof(uint32_t)),
364 LinkJumpConditionDirect = JUMP_ENUM_WITH_SIZE(2, 1 * sizeof(uint32_t)),
365 LinkJumpCondition = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint32_t)),
366 LinkJumpCompareAndBranch = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint32_t)),
367 LinkJumpCompareAndBranchDirect = JUMP_ENUM_WITH_SIZE(5, 1 * sizeof(uint32_t)),
368 LinkJumpTestBit = JUMP_ENUM_WITH_SIZE(6, 2 * sizeof(uint32_t)),
369 LinkJumpTestBitDirect = JUMP_ENUM_WITH_SIZE(7, 1 * sizeof(uint32_t)),
374 LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition)
376 data.realTypes.m_from = from;
377 data.realTypes.m_to = to;
378 data.realTypes.m_type = type;
379 data.realTypes.m_linkType = LinkInvalid;
380 data.realTypes.m_condition = condition;
382 LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition, bool is64Bit, RegisterID compareRegister)
384 data.realTypes.m_from = from;
385 data.realTypes.m_to = to;
386 data.realTypes.m_type = type;
387 data.realTypes.m_linkType = LinkInvalid;
388 data.realTypes.m_condition = condition;
389 data.realTypes.m_is64Bit = is64Bit;
390 data.realTypes.m_compareRegister = compareRegister;
392 LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition, unsigned bitNumber, RegisterID compareRegister)
394 data.realTypes.m_from = from;
395 data.realTypes.m_to = to;
396 data.realTypes.m_type = type;
397 data.realTypes.m_linkType = LinkInvalid;
398 data.realTypes.m_condition = condition;
399 data.realTypes.m_bitNumber = bitNumber;
400 data.realTypes.m_compareRegister = compareRegister;
402 void operator=(const LinkRecord& other)
404 data.copyTypes.content[0] = other.data.copyTypes.content[0];
405 data.copyTypes.content[1] = other.data.copyTypes.content[1];
406 data.copyTypes.content[2] = other.data.copyTypes.content[2];
408 intptr_t from() const { return data.realTypes.m_from; }
409 void setFrom(intptr_t from) { data.realTypes.m_from = from; }
410 intptr_t to() const { return data.realTypes.m_to; }
411 JumpType type() const { return data.realTypes.m_type; }
412 JumpLinkType linkType() const { return data.realTypes.m_linkType; }
413 void setLinkType(JumpLinkType linkType) { ASSERT(data.realTypes.m_linkType == LinkInvalid); data.realTypes.m_linkType = linkType; }
414 Condition condition() const { return data.realTypes.m_condition; }
415 bool is64Bit() const { return data.realTypes.m_is64Bit; }
416 unsigned bitNumber() const { return data.realTypes.m_bitNumber; }
417 RegisterID compareRegister() const { return data.realTypes.m_compareRegister; }
422 intptr_t m_from : 48;
425 JumpLinkType m_linkType : 8;
426 Condition m_condition : 4;
427 unsigned m_bitNumber : 6;
428 RegisterID m_compareRegister : 6;
434 COMPILE_ASSERT(sizeof(RealTypes) == sizeof(CopyTypes), LinkRecordCopyStructSizeEqualsRealStruct);
438 // bits(N) VFPExpandImm(bits(8) imm8);
440 // Encoding of floating point immediates is a litte complicated. Here's a
441 // high level description:
442 // +/-m*2-n where m and n are integers, 16 <= m <= 31, 0 <= n <= 7
443 // and the algirithm for expanding to a single precision float:
444 // return imm8<7>:NOT(imm8<6>):Replicate(imm8<6>,5):imm8<5:0>:Zeros(19);
446 // The trickiest bit is how the exponent is handled. The following table
447 // may help clarify things a little:
449 // 100 01111100 124 -3 1020 01111111100
450 // 101 01111101 125 -2 1021 01111111101
451 // 110 01111110 126 -1 1022 01111111110
452 // 111 01111111 127 0 1023 01111111111
453 // 000 10000000 128 1 1024 10000000000
454 // 001 10000001 129 2 1025 10000000001
455 // 010 10000010 130 3 1026 10000000010
456 // 011 10000011 131 4 1027 10000000011
457 // The first column shows the bit pattern stored in bits 6-4 of the arm
458 // encoded immediate. The second column shows the 8-bit IEEE 754 single
459 // -precision exponent in binary, the third column shows the raw decimal
460 // value. IEEE 754 single-precision numbers are stored with a bias of 127
461 // to the exponent, so the fourth column shows the resulting exponent.
462 // From this was can see that the exponent can be in the range -3..4,
463 // which agrees with the high level description given above. The fifth
464 // and sixth columns shows the value stored in a IEEE 754 double-precision
465 // number to represent these exponents in decimal and binary, given the
468 // Ultimately, detecting doubles that can be encoded as immediates on arm
469 // and encoding doubles is actually not too bad. A floating point value can
470 // be encoded by retaining the sign bit, the low three bits of the exponent
471 // and the high 4 bits of the mantissa. To validly be able to encode an
472 // immediate the remainder of the mantissa must be zero, and the high part
473 // of the exponent must match the top bit retained, bar the highest bit
474 // which must be its inverse.
475 static bool canEncodeFPImm(double d)
477 // Discard the sign bit, the low two bits of the exponent & the highest
478 // four bits of the mantissa.
479 uint64_t masked = bitwise_cast<uint64_t>(d) & 0x7fc0ffffffffffffull;
480 return (masked == 0x3fc0000000000000ull) || (masked == 0x4000000000000000ull);
483 template<int datasize>
484 static bool canEncodePImmOffset(int32_t offset)
486 return isValidScaledUImm12<datasize>(offset);
489 static bool canEncodeSImmOffset(int32_t offset)
491 return isValidSignedImm9(offset);
495 int encodeFPImm(double d)
497 ASSERT(canEncodeFPImm(d));
498 uint64_t u64 = bitwise_cast<uint64_t>(d);
499 return (static_cast<int>(u64 >> 56) & 0x80) | (static_cast<int>(u64 >> 48) & 0x7f);
502 template<int datasize>
503 int encodeShiftAmount(int amount)
505 ASSERT(!amount || datasize == (8 << amount));
509 template<int datasize>
510 static int encodePositiveImmediate(unsigned pimm)
512 ASSERT(!(pimm & ((datasize / 8) - 1)));
513 return pimm / (datasize / 8);
577 ExcepnOp_EXCEPTION = 0,
578 ExcepnOp_BREAKPOINT = 1,
585 FPCmpOp_FCMP0 = 0x08,
586 FPCmpOp_FCMPE = 0x10,
587 FPCmpOp_FCMPE0 = 0x18
595 enum FPDataOp1Source {
600 FPDataOp_FCVT_toSingle = 4,
601 FPDataOp_FCVT_toDouble = 5,
602 FPDataOp_FCVT_toHalf = 7,
605 FPDataOp_FRINTM = 10,
606 FPDataOp_FRINTZ = 11,
607 FPDataOp_FRINTA = 12,
608 FPDataOp_FRINTX = 14,
612 enum FPDataOp2Source {
625 SIMD_LogicalOp_AND = 0x03
629 FPIntConvOp_FCVTNS = 0x00,
630 FPIntConvOp_FCVTNU = 0x01,
631 FPIntConvOp_SCVTF = 0x02,
632 FPIntConvOp_UCVTF = 0x03,
633 FPIntConvOp_FCVTAS = 0x04,
634 FPIntConvOp_FCVTAU = 0x05,
635 FPIntConvOp_FMOV_QtoX = 0x06,
636 FPIntConvOp_FMOV_XtoQ = 0x07,
637 FPIntConvOp_FCVTPS = 0x08,
638 FPIntConvOp_FCVTPU = 0x09,
639 FPIntConvOp_FMOV_QtoX_top = 0x0e,
640 FPIntConvOp_FMOV_XtoQ_top = 0x0f,
641 FPIntConvOp_FCVTMS = 0x10,
642 FPIntConvOp_FCVTMU = 0x11,
643 FPIntConvOp_FCVTZS = 0x18,
644 FPIntConvOp_FCVTZU = 0x19,
659 MemOp_PREFETCH = 2, // size must be 3
660 MemOp_LOAD_signed64 = 2, // size may be 0, 1 or 2
661 MemOp_LOAD_signed32 = 3 // size may be 0 or 1
666 MemPairOp_LoadSigned_32 = 1,
669 MemPairOp_V32 = MemPairOp_32,
681 LdrLiteralOp_32BIT = 0,
682 LdrLiteralOp_64BIT = 1,
683 LdrLiteralOp_LDRSW = 2,
684 LdrLiteralOp_128BIT = 2
687 static unsigned memPairOffsetShift(bool V, MemPairOpSize size)
689 // return the log2 of the size in bytes, e.g. 64 bit size returns 3
692 return (size >> 1) + 2;
696 // Integer Instructions:
698 template<int datasize, SetFlags setFlags = DontSetFlags>
699 ALWAYS_INLINE void adc(RegisterID rd, RegisterID rn, RegisterID rm)
702 insn(addSubtractWithCarry(DATASIZE, AddOp_ADD, setFlags, rm, rn, rd));
705 template<int datasize, SetFlags setFlags = DontSetFlags>
706 ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, UInt12 imm12, int shift = 0)
709 ASSERT(!shift || shift == 12);
710 insn(addSubtractImmediate(DATASIZE, AddOp_ADD, setFlags, shift == 12, imm12, rn, rd));
713 template<int datasize, SetFlags setFlags = DontSetFlags>
714 ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm)
716 add<datasize, setFlags>(rd, rn, rm, LSL, 0);
719 template<int datasize, SetFlags setFlags = DontSetFlags>
720 ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
723 insn(addSubtractExtendedRegister(DATASIZE, AddOp_ADD, setFlags, rm, extend, amount, rn, rd));
726 template<int datasize, SetFlags setFlags = DontSetFlags>
727 ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
730 if (isSp(rd) || isSp(rn)) {
731 ASSERT(shift == LSL);
733 add<datasize, setFlags>(rd, rn, rm, UXTX, amount);
735 insn(addSubtractShiftedRegister(DATASIZE, AddOp_ADD, setFlags, shift, rm, amount, rn, rd));
738 ALWAYS_INLINE void adr(RegisterID rd, int offset)
740 insn(pcRelative(false, offset, rd));
743 ALWAYS_INLINE void adrp(RegisterID rd, int offset)
745 ASSERT(!(offset & 0xfff));
746 insn(pcRelative(true, offset >> 12, rd));
747 nopCortexA53Fix843419();
750 template<int datasize, SetFlags setFlags = DontSetFlags>
751 ALWAYS_INLINE void and_(RegisterID rd, RegisterID rn, RegisterID rm)
753 and_<datasize, setFlags>(rd, rn, rm, LSL, 0);
756 template<int datasize, SetFlags setFlags = DontSetFlags>
757 ALWAYS_INLINE void and_(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
760 insn(logicalShiftedRegister(DATASIZE, setFlags ? LogicalOp_ANDS : LogicalOp_AND, shift, false, rm, amount, rn, rd));
763 template<int datasize, SetFlags setFlags = DontSetFlags>
764 ALWAYS_INLINE void and_(RegisterID rd, RegisterID rn, LogicalImmediate imm)
767 insn(logicalImmediate(DATASIZE, setFlags ? LogicalOp_ANDS : LogicalOp_AND, imm.value(), rn, rd));
770 template<int datasize>
771 ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, int shift)
773 ASSERT(shift < datasize);
774 sbfm<datasize>(rd, rn, shift, datasize - 1);
777 template<int datasize>
778 ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, RegisterID rm)
780 asrv<datasize>(rd, rn, rm);
783 template<int datasize>
784 ALWAYS_INLINE void asrv(RegisterID rd, RegisterID rn, RegisterID rm)
787 insn(dataProcessing2Source(DATASIZE, rm, DataOp_ASRV, rn, rd));
790 ALWAYS_INLINE void b(int32_t offset = 0)
792 ASSERT(!(offset & 3));
794 ASSERT(offset == (offset << 6) >> 6);
795 insn(unconditionalBranchImmediate(false, offset));
798 ALWAYS_INLINE void b_cond(Condition cond, int32_t offset = 0)
800 ASSERT(!(offset & 3));
802 ASSERT(offset == (offset << 13) >> 13);
803 insn(conditionalBranchImmediate(offset, cond));
806 template<int datasize>
807 ALWAYS_INLINE void bfi(RegisterID rd, RegisterID rn, int lsb, int width)
809 bfm<datasize>(rd, rn, (datasize - lsb) & (datasize - 1), width - 1);
812 template<int datasize>
813 ALWAYS_INLINE void bfm(RegisterID rd, RegisterID rn, int immr, int imms)
816 insn(bitfield(DATASIZE, BitfieldOp_BFM, immr, imms, rn, rd));
819 template<int datasize>
820 ALWAYS_INLINE void bfxil(RegisterID rd, RegisterID rn, int lsb, int width)
822 bfm<datasize>(rd, rn, lsb, lsb + width - 1);
825 template<int datasize, SetFlags setFlags = DontSetFlags>
826 ALWAYS_INLINE void bic(RegisterID rd, RegisterID rn, RegisterID rm)
828 bic<datasize, setFlags>(rd, rn, rm, LSL, 0);
831 template<int datasize, SetFlags setFlags = DontSetFlags>
832 ALWAYS_INLINE void bic(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
835 insn(logicalShiftedRegister(DATASIZE, setFlags ? LogicalOp_ANDS : LogicalOp_AND, shift, true, rm, amount, rn, rd));
838 ALWAYS_INLINE void bl(int32_t offset = 0)
840 ASSERT(!(offset & 3));
842 insn(unconditionalBranchImmediate(true, offset));
845 ALWAYS_INLINE void blr(RegisterID rn)
847 insn(unconditionalBranchRegister(BranchType_CALL, rn));
850 ALWAYS_INLINE void br(RegisterID rn)
852 insn(unconditionalBranchRegister(BranchType_JMP, rn));
855 ALWAYS_INLINE void brk(uint16_t imm)
857 insn(excepnGeneration(ExcepnOp_BREAKPOINT, imm, 0));
860 template<int datasize>
861 ALWAYS_INLINE void cbnz(RegisterID rt, int32_t offset = 0)
864 ASSERT(!(offset & 3));
866 insn(compareAndBranchImmediate(DATASIZE, true, offset, rt));
869 template<int datasize>
870 ALWAYS_INLINE void cbz(RegisterID rt, int32_t offset = 0)
873 ASSERT(!(offset & 3));
875 insn(compareAndBranchImmediate(DATASIZE, false, offset, rt));
878 template<int datasize>
879 ALWAYS_INLINE void ccmn(RegisterID rn, RegisterID rm, int nzcv, Condition cond)
882 insn(conditionalCompareRegister(DATASIZE, AddOp_ADD, rm, cond, rn, nzcv));
885 template<int datasize>
886 ALWAYS_INLINE void ccmn(RegisterID rn, UInt5 imm, int nzcv, Condition cond)
889 insn(conditionalCompareImmediate(DATASIZE, AddOp_ADD, imm, cond, rn, nzcv));
892 template<int datasize>
893 ALWAYS_INLINE void ccmp(RegisterID rn, RegisterID rm, int nzcv, Condition cond)
896 insn(conditionalCompareRegister(DATASIZE, AddOp_SUB, rm, cond, rn, nzcv));
899 template<int datasize>
900 ALWAYS_INLINE void ccmp(RegisterID rn, UInt5 imm, int nzcv, Condition cond)
903 insn(conditionalCompareImmediate(DATASIZE, AddOp_SUB, imm, cond, rn, nzcv));
906 template<int datasize>
907 ALWAYS_INLINE void cinc(RegisterID rd, RegisterID rn, Condition cond)
909 csinc<datasize>(rd, rn, rn, invert(cond));
912 template<int datasize>
913 ALWAYS_INLINE void cinv(RegisterID rd, RegisterID rn, Condition cond)
915 csinv<datasize>(rd, rn, rn, invert(cond));
918 template<int datasize>
919 ALWAYS_INLINE void cls(RegisterID rd, RegisterID rn)
922 insn(dataProcessing1Source(DATASIZE, DataOp_CLS, rn, rd));
925 template<int datasize>
926 ALWAYS_INLINE void clz(RegisterID rd, RegisterID rn)
929 insn(dataProcessing1Source(DATASIZE, DataOp_CLZ, rn, rd));
932 template<int datasize>
933 ALWAYS_INLINE void cmn(RegisterID rn, UInt12 imm12, int shift = 0)
935 add<datasize, S>(ARM64Registers::zr, rn, imm12, shift);
938 template<int datasize>
939 ALWAYS_INLINE void cmn(RegisterID rn, RegisterID rm)
941 add<datasize, S>(ARM64Registers::zr, rn, rm);
944 template<int datasize>
945 ALWAYS_INLINE void cmn(RegisterID rn, RegisterID rm, ExtendType extend, int amount)
947 add<datasize, S>(ARM64Registers::zr, rn, rm, extend, amount);
950 template<int datasize>
951 ALWAYS_INLINE void cmn(RegisterID rn, RegisterID rm, ShiftType shift, int amount)
953 add<datasize, S>(ARM64Registers::zr, rn, rm, shift, amount);
956 template<int datasize>
957 ALWAYS_INLINE void cmp(RegisterID rn, UInt12 imm12, int shift = 0)
959 sub<datasize, S>(ARM64Registers::zr, rn, imm12, shift);
962 template<int datasize>
963 ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm)
965 sub<datasize, S>(ARM64Registers::zr, rn, rm);
968 template<int datasize>
969 ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ExtendType extend, int amount)
971 sub<datasize, S>(ARM64Registers::zr, rn, rm, extend, amount);
974 template<int datasize>
975 ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ShiftType shift, int amount)
977 sub<datasize, S>(ARM64Registers::zr, rn, rm, shift, amount);
980 template<int datasize>
981 ALWAYS_INLINE void cneg(RegisterID rd, RegisterID rn, Condition cond)
983 csneg<datasize>(rd, rn, rn, invert(cond));
986 template<int datasize>
987 ALWAYS_INLINE void csel(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond)
990 insn(conditionalSelect(DATASIZE, false, rm, cond, false, rn, rd));
993 template<int datasize>
994 ALWAYS_INLINE void cset(RegisterID rd, Condition cond)
996 csinc<datasize>(rd, ARM64Registers::zr, ARM64Registers::zr, invert(cond));
999 template<int datasize>
1000 ALWAYS_INLINE void csetm(RegisterID rd, Condition cond)
1002 csinv<datasize>(rd, ARM64Registers::zr, ARM64Registers::zr, invert(cond));
1005 template<int datasize>
1006 ALWAYS_INLINE void csinc(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond)
1009 insn(conditionalSelect(DATASIZE, false, rm, cond, true, rn, rd));
1012 template<int datasize>
1013 ALWAYS_INLINE void csinv(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond)
1016 insn(conditionalSelect(DATASIZE, true, rm, cond, false, rn, rd));
1019 template<int datasize>
1020 ALWAYS_INLINE void csneg(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond)
1023 insn(conditionalSelect(DATASIZE, true, rm, cond, true, rn, rd));
1026 template<int datasize>
1027 ALWAYS_INLINE void eon(RegisterID rd, RegisterID rn, RegisterID rm)
1029 eon<datasize>(rd, rn, rm, LSL, 0);
1032 template<int datasize>
1033 ALWAYS_INLINE void eon(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
1036 insn(logicalShiftedRegister(DATASIZE, LogicalOp_EOR, shift, true, rm, amount, rn, rd));
1039 template<int datasize>
1040 ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm)
1042 eor<datasize>(rd, rn, rm, LSL, 0);
1045 template<int datasize>
1046 ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
1049 insn(logicalShiftedRegister(DATASIZE, LogicalOp_EOR, shift, false, rm, amount, rn, rd));
1052 template<int datasize>
1053 ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, LogicalImmediate imm)
1056 insn(logicalImmediate(DATASIZE, LogicalOp_EOR, imm.value(), rn, rd));
1059 template<int datasize>
1060 ALWAYS_INLINE void extr(RegisterID rd, RegisterID rn, RegisterID rm, int lsb)
1063 insn(extract(DATASIZE, rm, lsb, rn, rd));
1066 ALWAYS_INLINE void hint(int imm)
1068 insn(hintPseudo(imm));
1071 ALWAYS_INLINE void hlt(uint16_t imm)
1073 insn(excepnGeneration(ExcepnOp_HALT, imm, 0));
1076 template<int datasize>
1077 ALWAYS_INLINE void ldp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPostIndex simm)
1080 insn(loadStoreRegisterPairPostIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_LOAD, simm, rn, rt, rt2));
1083 template<int datasize>
1084 ALWAYS_INLINE void ldp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPreIndex simm)
1087 insn(loadStoreRegisterPairPreIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_LOAD, simm, rn, rt, rt2));
1090 template<int datasize>
1091 ALWAYS_INLINE void ldp(RegisterID rt, RegisterID rt2, RegisterID rn, unsigned pimm = 0)
1094 insn(loadStoreRegisterPairOffset(MEMPAIROPSIZE_INT(datasize), false, MemOp_LOAD, pimm, rn, rt, rt2));
1097 template<int datasize>
1098 ALWAYS_INLINE void ldnp(RegisterID rt, RegisterID rt2, RegisterID rn, unsigned pimm = 0)
1101 insn(loadStoreRegisterPairNonTemporal(MEMPAIROPSIZE_INT(datasize), false, MemOp_LOAD, pimm, rn, rt, rt2));
1104 template<int datasize>
1105 ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm)
1107 ldr<datasize>(rt, rn, rm, UXTX, 0);
1110 template<int datasize>
1111 ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
1114 insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, false, MemOp_LOAD, rm, extend, encodeShiftAmount<datasize>(amount), rn, rt));
1117 template<int datasize>
1118 ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, unsigned pimm)
1121 insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, false, MemOp_LOAD, encodePositiveImmediate<datasize>(pimm), rn, rt));
1124 template<int datasize>
1125 ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, PostIndex simm)
1128 insn(loadStoreRegisterPostIndex(MEMOPSIZE, false, MemOp_LOAD, simm, rn, rt));
1131 template<int datasize>
1132 ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, PreIndex simm)
1135 insn(loadStoreRegisterPreIndex(MEMOPSIZE, false, MemOp_LOAD, simm, rn, rt));
1138 template<int datasize>
1139 ALWAYS_INLINE void ldr_literal(RegisterID rt, int offset = 0)
1142 ASSERT(!(offset & 3));
1143 insn(loadRegisterLiteral(datasize == 64 ? LdrLiteralOp_64BIT : LdrLiteralOp_32BIT, false, offset >> 2, rt));
1146 ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm)
1148 // Not calling the 5 argument form of ldrb, since is amount is ommitted S is false.
1149 insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_LOAD, rm, UXTX, false, rn, rt));
1152 ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
1154 ASSERT_UNUSED(amount, !amount);
1155 insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_LOAD, rm, extend, true, rn, rt));
1158 ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, unsigned pimm)
1160 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128, false, MemOp_LOAD, encodePositiveImmediate<8>(pimm), rn, rt));
1163 ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, PostIndex simm)
1165 insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128, false, MemOp_LOAD, simm, rn, rt));
1168 ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, PreIndex simm)
1170 insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128, false, MemOp_LOAD, simm, rn, rt));
1173 ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm)
1175 ldrh(rt, rn, rm, UXTX, 0);
1178 ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
1180 ASSERT(!amount || amount == 1);
1181 insn(loadStoreRegisterRegisterOffset(MemOpSize_16, false, MemOp_LOAD, rm, extend, amount == 1, rn, rt));
1184 ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, unsigned pimm)
1186 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16, false, MemOp_LOAD, encodePositiveImmediate<16>(pimm), rn, rt));
1189 ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, PostIndex simm)
1191 insn(loadStoreRegisterPostIndex(MemOpSize_16, false, MemOp_LOAD, simm, rn, rt));
1194 ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, PreIndex simm)
1196 insn(loadStoreRegisterPreIndex(MemOpSize_16, false, MemOp_LOAD, simm, rn, rt));
1199 template<int datasize>
1200 ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm)
1203 // Not calling the 5 argument form of ldrsb, since is amount is ommitted S is false.
1204 insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, rm, UXTX, false, rn, rt));
1207 template<int datasize>
1208 ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
1211 ASSERT_UNUSED(amount, !amount);
1212 insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, rm, extend, true, rn, rt));
1215 template<int datasize>
1216 ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, unsigned pimm)
1219 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, encodePositiveImmediate<8>(pimm), rn, rt));
1222 template<int datasize>
1223 ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, PostIndex simm)
1226 insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt));
1229 template<int datasize>
1230 ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, PreIndex simm)
1233 insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt));
1236 template<int datasize>
1237 ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm)
1239 ldrsh<datasize>(rt, rn, rm, UXTX, 0);
1242 template<int datasize>
1243 ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
1246 ASSERT(!amount || amount == 1);
1247 insn(loadStoreRegisterRegisterOffset(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, rm, extend, amount == 1, rn, rt));
1250 template<int datasize>
1251 ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, unsigned pimm)
1254 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, encodePositiveImmediate<16>(pimm), rn, rt));
1257 template<int datasize>
1258 ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, PostIndex simm)
1261 insn(loadStoreRegisterPostIndex(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt));
1264 template<int datasize>
1265 ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, PreIndex simm)
1268 insn(loadStoreRegisterPreIndex(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt));
1271 ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, RegisterID rm)
1273 ldrsw(rt, rn, rm, UXTX, 0);
1276 ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
1278 ASSERT(!amount || amount == 2);
1279 insn(loadStoreRegisterRegisterOffset(MemOpSize_32, false, MemOp_LOAD_signed64, rm, extend, amount == 2, rn, rt));
1282 ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, unsigned pimm)
1284 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_32, false, MemOp_LOAD_signed64, encodePositiveImmediate<32>(pimm), rn, rt));
1287 ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, PostIndex simm)
1289 insn(loadStoreRegisterPostIndex(MemOpSize_32, false, MemOp_LOAD_signed64, simm, rn, rt));
1292 ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, PreIndex simm)
1294 insn(loadStoreRegisterPreIndex(MemOpSize_32, false, MemOp_LOAD_signed64, simm, rn, rt));
1297 ALWAYS_INLINE void ldrsw_literal(RegisterID rt, int offset = 0)
1299 ASSERT(!(offset & 3));
1300 insn(loadRegisterLiteral(LdrLiteralOp_LDRSW, false, offset >> 2, rt));
1303 template<int datasize>
1304 ALWAYS_INLINE void ldur(RegisterID rt, RegisterID rn, int simm)
1307 insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, false, MemOp_LOAD, simm, rn, rt));
1310 ALWAYS_INLINE void ldurb(RegisterID rt, RegisterID rn, int simm)
1312 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128, false, MemOp_LOAD, simm, rn, rt));
1315 ALWAYS_INLINE void ldurh(RegisterID rt, RegisterID rn, int simm)
1317 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16, false, MemOp_LOAD, simm, rn, rt));
1320 template<int datasize>
1321 ALWAYS_INLINE void ldursb(RegisterID rt, RegisterID rn, int simm)
1324 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt));
1327 template<int datasize>
1328 ALWAYS_INLINE void ldursh(RegisterID rt, RegisterID rn, int simm)
1331 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt));
1334 ALWAYS_INLINE void ldursw(RegisterID rt, RegisterID rn, int simm)
1336 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_32, false, MemOp_LOAD_signed64, simm, rn, rt));
1339 template<int datasize>
1340 ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, int shift)
1342 ASSERT(shift < datasize);
1343 ubfm<datasize>(rd, rn, (datasize - shift) & (datasize - 1), datasize - 1 - shift);
1346 template<int datasize>
1347 ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, RegisterID rm)
1349 lslv<datasize>(rd, rn, rm);
1352 template<int datasize>
1353 ALWAYS_INLINE void lslv(RegisterID rd, RegisterID rn, RegisterID rm)
1356 insn(dataProcessing2Source(DATASIZE, rm, DataOp_LSLV, rn, rd));
1359 template<int datasize>
1360 ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, int shift)
1362 ASSERT(shift < datasize);
1363 ubfm<datasize>(rd, rn, shift, datasize - 1);
1366 template<int datasize>
1367 ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, RegisterID rm)
1369 lsrv<datasize>(rd, rn, rm);
1372 template<int datasize>
1373 ALWAYS_INLINE void lsrv(RegisterID rd, RegisterID rn, RegisterID rm)
1376 insn(dataProcessing2Source(DATASIZE, rm, DataOp_LSRV, rn, rd));
1379 template<int datasize>
1380 ALWAYS_INLINE void madd(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
1383 nopCortexA53Fix835769<datasize>();
1384 insn(dataProcessing3Source(DATASIZE, DataOp_MADD, rm, ra, rn, rd));
1387 template<int datasize>
1388 ALWAYS_INLINE void mneg(RegisterID rd, RegisterID rn, RegisterID rm)
1390 msub<datasize>(rd, rn, rm, ARM64Registers::zr);
1393 template<int datasize>
1394 ALWAYS_INLINE void mov(RegisterID rd, RegisterID rm)
1396 if (isSp(rd) || isSp(rm))
1397 add<datasize>(rd, rm, UInt12(0));
1399 orr<datasize>(rd, ARM64Registers::zr, rm);
1402 template<int datasize>
1403 ALWAYS_INLINE void movi(RegisterID rd, LogicalImmediate imm)
1405 orr<datasize>(rd, ARM64Registers::zr, imm);
1408 template<int datasize>
1409 ALWAYS_INLINE void movk(RegisterID rd, uint16_t value, int shift = 0)
1412 ASSERT(!(shift & 0xf));
1413 insn(moveWideImediate(DATASIZE, MoveWideOp_K, shift >> 4, value, rd));
1416 template<int datasize>
1417 ALWAYS_INLINE void movn(RegisterID rd, uint16_t value, int shift = 0)
1420 ASSERT(!(shift & 0xf));
1421 insn(moveWideImediate(DATASIZE, MoveWideOp_N, shift >> 4, value, rd));
1424 template<int datasize>
1425 ALWAYS_INLINE void movz(RegisterID rd, uint16_t value, int shift = 0)
1428 ASSERT(!(shift & 0xf));
1429 insn(moveWideImediate(DATASIZE, MoveWideOp_Z, shift >> 4, value, rd));
1432 template<int datasize>
1433 ALWAYS_INLINE void msub(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
1436 nopCortexA53Fix835769<datasize>();
1437 insn(dataProcessing3Source(DATASIZE, DataOp_MSUB, rm, ra, rn, rd));
1440 template<int datasize>
1441 ALWAYS_INLINE void mul(RegisterID rd, RegisterID rn, RegisterID rm)
1443 madd<datasize>(rd, rn, rm, ARM64Registers::zr);
1446 template<int datasize>
1447 ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm)
1449 orn<datasize>(rd, ARM64Registers::zr, rm);
1452 template<int datasize>
1453 ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm, ShiftType shift, int amount)
1455 orn<datasize>(rd, ARM64Registers::zr, rm, shift, amount);
1458 template<int datasize, SetFlags setFlags = DontSetFlags>
1459 ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm)
1461 sub<datasize, setFlags>(rd, ARM64Registers::zr, rm);
1464 template<int datasize, SetFlags setFlags = DontSetFlags>
1465 ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm, ShiftType shift, int amount)
1467 sub<datasize, setFlags>(rd, ARM64Registers::zr, rm, shift, amount);
1470 template<int datasize, SetFlags setFlags = DontSetFlags>
1471 ALWAYS_INLINE void ngc(RegisterID rd, RegisterID rm)
1473 sbc<datasize, setFlags>(rd, ARM64Registers::zr, rm);
1476 template<int datasize, SetFlags setFlags = DontSetFlags>
1477 ALWAYS_INLINE void ngc(RegisterID rd, RegisterID rm, ShiftType shift, int amount)
1479 sbc<datasize, setFlags>(rd, ARM64Registers::zr, rm, shift, amount);
1482 ALWAYS_INLINE void nop()
1487 static void fillNops(void* base, size_t size, bool isCopyingToExecutableMemory)
1489 RELEASE_ASSERT(!(size % sizeof(int32_t)));
1490 size_t n = size / sizeof(int32_t);
1491 for (int32_t* ptr = static_cast<int32_t*>(base); n--;) {
1492 int insn = nopPseudo();
1493 if (isCopyingToExecutableMemory)
1494 performJITMemcpy(ptr++, &insn, sizeof(int));
1496 memcpy(ptr++, &insn, sizeof(int));
1500 ALWAYS_INLINE void dmbSY()
1505 template<int datasize>
1506 ALWAYS_INLINE void orn(RegisterID rd, RegisterID rn, RegisterID rm)
1508 orn<datasize>(rd, rn, rm, LSL, 0);
1511 template<int datasize>
1512 ALWAYS_INLINE void orn(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
1515 insn(logicalShiftedRegister(DATASIZE, LogicalOp_ORR, shift, true, rm, amount, rn, rd));
1518 template<int datasize>
1519 ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm)
1521 orr<datasize>(rd, rn, rm, LSL, 0);
1524 template<int datasize>
1525 ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
1528 insn(logicalShiftedRegister(DATASIZE, LogicalOp_ORR, shift, false, rm, amount, rn, rd));
1531 template<int datasize>
1532 ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, LogicalImmediate imm)
1535 insn(logicalImmediate(DATASIZE, LogicalOp_ORR, imm.value(), rn, rd));
1538 template<int datasize>
1539 ALWAYS_INLINE void rbit(RegisterID rd, RegisterID rn)
1542 insn(dataProcessing1Source(DATASIZE, DataOp_RBIT, rn, rd));
1545 ALWAYS_INLINE void ret(RegisterID rn = ARM64Registers::lr)
1547 insn(unconditionalBranchRegister(BranchType_RET, rn));
1550 template<int datasize>
1551 ALWAYS_INLINE void rev(RegisterID rd, RegisterID rn)
1554 if (datasize == 32) // 'rev' mnemonic means REV32 or REV64 depending on the operand width.
1555 insn(dataProcessing1Source(Datasize_32, DataOp_REV32, rn, rd));
1557 insn(dataProcessing1Source(Datasize_64, DataOp_REV64, rn, rd));
1560 template<int datasize>
1561 ALWAYS_INLINE void rev16(RegisterID rd, RegisterID rn)
1564 insn(dataProcessing1Source(DATASIZE, DataOp_REV16, rn, rd));
1567 template<int datasize>
1568 ALWAYS_INLINE void rev32(RegisterID rd, RegisterID rn)
1570 ASSERT(datasize == 64); // 'rev32' only valid with 64-bit operands.
1571 insn(dataProcessing1Source(Datasize_64, DataOp_REV32, rn, rd));
1574 template<int datasize>
1575 ALWAYS_INLINE void ror(RegisterID rd, RegisterID rn, RegisterID rm)
1577 rorv<datasize>(rd, rn, rm);
1580 template<int datasize>
1581 ALWAYS_INLINE void ror(RegisterID rd, RegisterID rs, int shift)
1583 extr<datasize>(rd, rs, rs, shift);
1586 template<int datasize>
1587 ALWAYS_INLINE void rorv(RegisterID rd, RegisterID rn, RegisterID rm)
1590 insn(dataProcessing2Source(DATASIZE, rm, DataOp_RORV, rn, rd));
1593 template<int datasize, SetFlags setFlags = DontSetFlags>
1594 ALWAYS_INLINE void sbc(RegisterID rd, RegisterID rn, RegisterID rm)
1597 insn(addSubtractWithCarry(DATASIZE, AddOp_SUB, setFlags, rm, rn, rd));
1600 template<int datasize>
1601 ALWAYS_INLINE void sbfiz(RegisterID rd, RegisterID rn, int lsb, int width)
1603 sbfm<datasize>(rd, rn, (datasize - lsb) & (datasize - 1), width - 1);
1606 template<int datasize>
1607 ALWAYS_INLINE void sbfm(RegisterID rd, RegisterID rn, int immr, int imms)
1610 insn(bitfield(DATASIZE, BitfieldOp_SBFM, immr, imms, rn, rd));
1613 template<int datasize>
1614 ALWAYS_INLINE void sbfx(RegisterID rd, RegisterID rn, int lsb, int width)
1616 sbfm<datasize>(rd, rn, lsb, lsb + width - 1);
1619 template<int datasize>
1620 ALWAYS_INLINE void sdiv(RegisterID rd, RegisterID rn, RegisterID rm)
1623 insn(dataProcessing2Source(DATASIZE, rm, DataOp_SDIV, rn, rd));
1626 ALWAYS_INLINE void smaddl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
1628 nopCortexA53Fix835769<64>();
1629 insn(dataProcessing3Source(Datasize_64, DataOp_SMADDL, rm, ra, rn, rd));
1632 ALWAYS_INLINE void smnegl(RegisterID rd, RegisterID rn, RegisterID rm)
1634 smsubl(rd, rn, rm, ARM64Registers::zr);
1637 ALWAYS_INLINE void smsubl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
1639 nopCortexA53Fix835769<64>();
1640 insn(dataProcessing3Source(Datasize_64, DataOp_SMSUBL, rm, ra, rn, rd));
1643 ALWAYS_INLINE void smulh(RegisterID rd, RegisterID rn, RegisterID rm)
1645 insn(dataProcessing3Source(Datasize_64, DataOp_SMULH, rm, ARM64Registers::zr, rn, rd));
1648 ALWAYS_INLINE void smull(RegisterID rd, RegisterID rn, RegisterID rm)
1650 smaddl(rd, rn, rm, ARM64Registers::zr);
1653 template<int datasize>
1654 ALWAYS_INLINE void stp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPostIndex simm)
1657 insn(loadStoreRegisterPairPostIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_STORE, simm, rn, rt, rt2));
1660 template<int datasize>
1661 ALWAYS_INLINE void stp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPreIndex simm)
1664 insn(loadStoreRegisterPairPreIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_STORE, simm, rn, rt, rt2));
1667 template<int datasize>
1668 ALWAYS_INLINE void stp(RegisterID rt, RegisterID rt2, RegisterID rn, unsigned pimm = 0)
1671 insn(loadStoreRegisterPairOffset(MEMPAIROPSIZE_INT(datasize), false, MemOp_STORE, pimm, rn, rt, rt2));
1674 template<int datasize>
1675 ALWAYS_INLINE void stnp(RegisterID rt, RegisterID rt2, RegisterID rn, unsigned pimm = 0)
1678 insn(loadStoreRegisterPairNonTemporal(MEMPAIROPSIZE_INT(datasize), false, MemOp_STORE, pimm, rn, rt, rt2));
1681 template<int datasize>
1682 ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm)
1684 str<datasize>(rt, rn, rm, UXTX, 0);
1687 template<int datasize>
1688 ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
1691 insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, false, MemOp_STORE, rm, extend, encodeShiftAmount<datasize>(amount), rn, rt));
1694 template<int datasize>
1695 ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, unsigned pimm)
1698 insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, false, MemOp_STORE, encodePositiveImmediate<datasize>(pimm), rn, rt));
1701 template<int datasize>
1702 ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, PostIndex simm)
1705 insn(loadStoreRegisterPostIndex(MEMOPSIZE, false, MemOp_STORE, simm, rn, rt));
1708 template<int datasize>
1709 ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, PreIndex simm)
1712 insn(loadStoreRegisterPreIndex(MEMOPSIZE, false, MemOp_STORE, simm, rn, rt));
1715 ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm)
1717 // Not calling the 5 argument form of strb, since is amount is ommitted S is false.
1718 insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_STORE, rm, UXTX, false, rn, rt));
1721 ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
1723 ASSERT_UNUSED(amount, !amount);
1724 insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_STORE, rm, extend, true, rn, rt));
1727 ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, unsigned pimm)
1729 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128, false, MemOp_STORE, encodePositiveImmediate<8>(pimm), rn, rt));
1732 ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, PostIndex simm)
1734 insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128, false, MemOp_STORE, simm, rn, rt));
1737 ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, PreIndex simm)
1739 insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128, false, MemOp_STORE, simm, rn, rt));
1742 ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm)
1744 strh(rt, rn, rm, UXTX, 0);
1747 ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
1749 ASSERT(!amount || amount == 1);
1750 insn(loadStoreRegisterRegisterOffset(MemOpSize_16, false, MemOp_STORE, rm, extend, amount == 1, rn, rt));
1753 ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, unsigned pimm)
1755 insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16, false, MemOp_STORE, encodePositiveImmediate<16>(pimm), rn, rt));
1758 ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, PostIndex simm)
1760 insn(loadStoreRegisterPostIndex(MemOpSize_16, false, MemOp_STORE, simm, rn, rt));
1763 ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, PreIndex simm)
1765 insn(loadStoreRegisterPreIndex(MemOpSize_16, false, MemOp_STORE, simm, rn, rt));
1768 template<int datasize>
1769 ALWAYS_INLINE void stur(RegisterID rt, RegisterID rn, int simm)
1772 insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, false, MemOp_STORE, simm, rn, rt));
1775 ALWAYS_INLINE void sturb(RegisterID rt, RegisterID rn, int simm)
1777 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128, false, MemOp_STORE, simm, rn, rt));
1780 ALWAYS_INLINE void sturh(RegisterID rt, RegisterID rn, int simm)
1782 insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16, false, MemOp_STORE, simm, rn, rt));
1785 template<int datasize, SetFlags setFlags = DontSetFlags>
1786 ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, UInt12 imm12, int shift = 0)
1789 ASSERT(!shift || shift == 12);
1790 insn(addSubtractImmediate(DATASIZE, AddOp_SUB, setFlags, shift == 12, imm12, rn, rd));
1793 template<int datasize, SetFlags setFlags = DontSetFlags>
1794 ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm)
1796 ASSERT_WITH_MESSAGE(!isSp(rd) || setFlags == DontSetFlags, "SUBS with shifted register does not support SP for Xd, it uses XZR for the register 31. SUBS with extended register support SP for Xd, but only if SetFlag is not used, otherwise register 31 is Xd.");
1797 ASSERT_WITH_MESSAGE(!isSp(rm), "No encoding of SUBS supports SP for the third operand.");
1799 if (isSp(rd) || isSp(rn))
1800 sub<datasize, setFlags>(rd, rn, rm, UXTX, 0);
1802 sub<datasize, setFlags>(rd, rn, rm, LSL, 0);
1805 template<int datasize, SetFlags setFlags = DontSetFlags>
1806 ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
1809 insn(addSubtractExtendedRegister(DATASIZE, AddOp_SUB, setFlags, rm, extend, amount, rn, rd));
1812 template<int datasize, SetFlags setFlags = DontSetFlags>
1813 ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
1816 ASSERT(!isSp(rd) && !isSp(rn) && !isSp(rm));
1817 insn(addSubtractShiftedRegister(DATASIZE, AddOp_SUB, setFlags, shift, rm, amount, rn, rd));
1820 template<int datasize>
1821 ALWAYS_INLINE void sxtb(RegisterID rd, RegisterID rn)
1823 sbfm<datasize>(rd, rn, 0, 7);
1826 template<int datasize>
1827 ALWAYS_INLINE void sxth(RegisterID rd, RegisterID rn)
1829 sbfm<datasize>(rd, rn, 0, 15);
1832 ALWAYS_INLINE void sxtw(RegisterID rd, RegisterID rn)
1834 sbfm<64>(rd, rn, 0, 31);
1837 ALWAYS_INLINE void tbz(RegisterID rt, int imm, int offset = 0)
1839 ASSERT(!(offset & 3));
1841 insn(testAndBranchImmediate(false, imm, offset, rt));
1844 ALWAYS_INLINE void tbnz(RegisterID rt, int imm, int offset = 0)
1846 ASSERT(!(offset & 3));
1848 insn(testAndBranchImmediate(true, imm, offset, rt));
1851 template<int datasize>
1852 ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm)
1854 and_<datasize, S>(ARM64Registers::zr, rn, rm);
1857 template<int datasize>
1858 ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm, ShiftType shift, int amount)
1860 and_<datasize, S>(ARM64Registers::zr, rn, rm, shift, amount);
1863 template<int datasize>
1864 ALWAYS_INLINE void tst(RegisterID rn, LogicalImmediate imm)
1866 and_<datasize, S>(ARM64Registers::zr, rn, imm);
1869 template<int datasize>
1870 ALWAYS_INLINE void ubfiz(RegisterID rd, RegisterID rn, int lsb, int width)
1872 ubfm<datasize>(rd, rn, (datasize - lsb) & (datasize - 1), width - 1);
1875 template<int datasize>
1876 ALWAYS_INLINE void ubfm(RegisterID rd, RegisterID rn, int immr, int imms)
1879 insn(bitfield(DATASIZE, BitfieldOp_UBFM, immr, imms, rn, rd));
1882 template<int datasize>
1883 ALWAYS_INLINE void ubfx(RegisterID rd, RegisterID rn, int lsb, int width)
1885 ubfm<datasize>(rd, rn, lsb, lsb + width - 1);
1888 template<int datasize>
1889 ALWAYS_INLINE void udiv(RegisterID rd, RegisterID rn, RegisterID rm)
1892 insn(dataProcessing2Source(DATASIZE, rm, DataOp_UDIV, rn, rd));
1895 ALWAYS_INLINE void umaddl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
1897 nopCortexA53Fix835769<64>();
1898 insn(dataProcessing3Source(Datasize_64, DataOp_UMADDL, rm, ra, rn, rd));
1901 ALWAYS_INLINE void umnegl(RegisterID rd, RegisterID rn, RegisterID rm)
1903 umsubl(rd, rn, rm, ARM64Registers::zr);
1906 ALWAYS_INLINE void umsubl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
1908 nopCortexA53Fix835769<64>();
1909 insn(dataProcessing3Source(Datasize_64, DataOp_UMSUBL, rm, ra, rn, rd));
1912 ALWAYS_INLINE void umulh(RegisterID rd, RegisterID rn, RegisterID rm)
1914 insn(dataProcessing3Source(Datasize_64, DataOp_UMULH, rm, ARM64Registers::zr, rn, rd));
1917 ALWAYS_INLINE void umull(RegisterID rd, RegisterID rn, RegisterID rm)
1919 umaddl(rd, rn, rm, ARM64Registers::zr);
1922 template<int datasize>
1923 ALWAYS_INLINE void uxtb(RegisterID rd, RegisterID rn)
1925 ubfm<datasize>(rd, rn, 0, 7);
1928 template<int datasize>
1929 ALWAYS_INLINE void uxth(RegisterID rd, RegisterID rn)
1931 ubfm<datasize>(rd, rn, 0, 15);
1934 ALWAYS_INLINE void uxtw(RegisterID rd, RegisterID rn)
1936 ubfm<64>(rd, rn, 0, 31);
1939 // Floating Point Instructions:
1941 template<int datasize>
1942 ALWAYS_INLINE void fabs(FPRegisterID vd, FPRegisterID vn)
1945 insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FABS, vn, vd));
1948 template<int datasize>
1949 ALWAYS_INLINE void fadd(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
1952 insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FADD, vn, vd));
1955 template<int datasize>
1956 ALWAYS_INLINE void fccmp(FPRegisterID vn, FPRegisterID vm, int nzcv, Condition cond)
1959 insn(floatingPointConditionalCompare(DATASIZE, vm, cond, vn, FPCondCmpOp_FCMP, nzcv));
1962 template<int datasize>
1963 ALWAYS_INLINE void fccmpe(FPRegisterID vn, FPRegisterID vm, int nzcv, Condition cond)
1966 insn(floatingPointConditionalCompare(DATASIZE, vm, cond, vn, FPCondCmpOp_FCMPE, nzcv));
1969 template<int datasize>
1970 ALWAYS_INLINE void fcmp(FPRegisterID vn, FPRegisterID vm)
1973 insn(floatingPointCompare(DATASIZE, vm, vn, FPCmpOp_FCMP));
1976 template<int datasize>
1977 ALWAYS_INLINE void fcmp_0(FPRegisterID vn)
1980 insn(floatingPointCompare(DATASIZE, static_cast<FPRegisterID>(0), vn, FPCmpOp_FCMP0));
1983 template<int datasize>
1984 ALWAYS_INLINE void fcmpe(FPRegisterID vn, FPRegisterID vm)
1987 insn(floatingPointCompare(DATASIZE, vm, vn, FPCmpOp_FCMPE));
1990 template<int datasize>
1991 ALWAYS_INLINE void fcmpe_0(FPRegisterID vn)
1994 insn(floatingPointCompare(DATASIZE, static_cast<FPRegisterID>(0), vn, FPCmpOp_FCMPE0));
1997 template<int datasize>
1998 ALWAYS_INLINE void fcsel(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, Condition cond)
2001 insn(floatingPointConditionalSelect(DATASIZE, vm, cond, vn, vd));
2004 template<int dstsize, int srcsize>
2005 ALWAYS_INLINE void fcvt(FPRegisterID vd, FPRegisterID vn)
2007 ASSERT(dstsize == 16 || dstsize == 32 || dstsize == 64);
2008 ASSERT(srcsize == 16 || srcsize == 32 || srcsize == 64);
2009 ASSERT(dstsize != srcsize);
2010 Datasize type = (srcsize == 64) ? Datasize_64 : (srcsize == 32) ? Datasize_32 : Datasize_16;
2011 FPDataOp1Source opcode = (dstsize == 64) ? FPDataOp_FCVT_toDouble : (dstsize == 32) ? FPDataOp_FCVT_toSingle : FPDataOp_FCVT_toHalf;
2012 insn(floatingPointDataProcessing1Source(type, opcode, vn, vd));
2015 template<int dstsize, int srcsize>
2016 ALWAYS_INLINE void fcvtas(RegisterID rd, FPRegisterID vn)
2018 CHECK_DATASIZE_OF(dstsize);
2019 CHECK_DATASIZE_OF(srcsize);
2020 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTAS, vn, rd));
2023 template<int dstsize, int srcsize>
2024 ALWAYS_INLINE void fcvtau(RegisterID rd, FPRegisterID vn)
2026 CHECK_DATASIZE_OF(dstsize);
2027 CHECK_DATASIZE_OF(srcsize);
2028 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTAU, vn, rd));
2031 template<int dstsize, int srcsize>
2032 ALWAYS_INLINE void fcvtms(RegisterID rd, FPRegisterID vn)
2034 CHECK_DATASIZE_OF(dstsize);
2035 CHECK_DATASIZE_OF(srcsize);
2036 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTMS, vn, rd));
2039 template<int dstsize, int srcsize>
2040 ALWAYS_INLINE void fcvtmu(RegisterID rd, FPRegisterID vn)
2042 CHECK_DATASIZE_OF(dstsize);
2043 CHECK_DATASIZE_OF(srcsize);
2044 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTMU, vn, rd));
2047 template<int dstsize, int srcsize>
2048 ALWAYS_INLINE void fcvtns(RegisterID rd, FPRegisterID vn)
2050 CHECK_DATASIZE_OF(dstsize);
2051 CHECK_DATASIZE_OF(srcsize);
2052 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTNS, vn, rd));
2055 template<int dstsize, int srcsize>
2056 ALWAYS_INLINE void fcvtnu(RegisterID rd, FPRegisterID vn)
2058 CHECK_DATASIZE_OF(dstsize);
2059 CHECK_DATASIZE_OF(srcsize);
2060 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTNU, vn, rd));
2063 template<int dstsize, int srcsize>
2064 ALWAYS_INLINE void fcvtps(RegisterID rd, FPRegisterID vn)
2066 CHECK_DATASIZE_OF(dstsize);
2067 CHECK_DATASIZE_OF(srcsize);
2068 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTPS, vn, rd));
2071 template<int dstsize, int srcsize>
2072 ALWAYS_INLINE void fcvtpu(RegisterID rd, FPRegisterID vn)
2074 CHECK_DATASIZE_OF(dstsize);
2075 CHECK_DATASIZE_OF(srcsize);
2076 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTPU, vn, rd));
2079 template<int dstsize, int srcsize>
2080 ALWAYS_INLINE void fcvtzs(RegisterID rd, FPRegisterID vn)
2082 CHECK_DATASIZE_OF(dstsize);
2083 CHECK_DATASIZE_OF(srcsize);
2084 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTZS, vn, rd));
2087 template<int dstsize, int srcsize>
2088 ALWAYS_INLINE void fcvtzu(RegisterID rd, FPRegisterID vn)
2090 CHECK_DATASIZE_OF(dstsize);
2091 CHECK_DATASIZE_OF(srcsize);
2092 insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTZU, vn, rd));
2095 template<int datasize>
2096 ALWAYS_INLINE void fdiv(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
2099 insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FDIV, vn, vd));
2102 template<int datasize>
2103 ALWAYS_INLINE void fmadd(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va)
2106 insn(floatingPointDataProcessing3Source(DATASIZE, false, vm, AddOp_ADD, va, vn, vd));
2109 template<int datasize>
2110 ALWAYS_INLINE void fmax(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
2113 insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMAX, vn, vd));
2116 template<int datasize>
2117 ALWAYS_INLINE void fmaxnm(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
2120 insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMAXNM, vn, vd));
2123 template<int datasize>
2124 ALWAYS_INLINE void fmin(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
2127 insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMIN, vn, vd));
2130 template<int datasize>
2131 ALWAYS_INLINE void fminnm(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
2134 insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMINNM, vn, vd));
2137 template<int datasize>
2138 ALWAYS_INLINE void fmov(FPRegisterID vd, FPRegisterID vn)
2141 insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FMOV, vn, vd));
2144 template<int datasize>
2145 ALWAYS_INLINE void fmov(FPRegisterID vd, RegisterID rn)
2148 insn(floatingPointIntegerConversions(DATASIZE, DATASIZE, FPIntConvOp_FMOV_XtoQ, rn, vd));
2151 template<int datasize>
2152 ALWAYS_INLINE void fmov(RegisterID rd, FPRegisterID vn)
2155 insn(floatingPointIntegerConversions(DATASIZE, DATASIZE, FPIntConvOp_FMOV_QtoX, vn, rd));
2158 template<int datasize>
2159 ALWAYS_INLINE void fmov(FPRegisterID vd, double imm)
2162 insn(floatingPointImmediate(DATASIZE, encodeFPImm(imm), vd));
2165 ALWAYS_INLINE void fmov_top(FPRegisterID vd, RegisterID rn)
2167 insn(floatingPointIntegerConversions(Datasize_64, Datasize_64, FPIntConvOp_FMOV_XtoQ_top, rn, vd));
2170 ALWAYS_INLINE void fmov_top(RegisterID rd, FPRegisterID vn)
2172 insn(floatingPointIntegerConversions(Datasize_64, Datasize_64, FPIntConvOp_FMOV_QtoX_top, vn, rd));
2175 template<int datasize>
2176 ALWAYS_INLINE void fmsub(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va)
2179 insn(floatingPointDataProcessing3Source(DATASIZE, false, vm, AddOp_SUB, va, vn, vd));
2182 template<int datasize>
2183 ALWAYS_INLINE void fmul(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
2186 insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMUL, vn, vd));
2189 template<int datasize>
2190 ALWAYS_INLINE void fneg(FPRegisterID vd, FPRegisterID vn)
2193 insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FNEG, vn, vd));
2196 template<int datasize>
2197 ALWAYS_INLINE void fnmadd(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va)
2200 insn(floatingPointDataProcessing3Source(DATASIZE, true, vm, AddOp_ADD, va, vn, vd));
2203 template<int datasize>
2204 ALWAYS_INLINE void fnmsub(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va)
2207 insn(floatingPointDataProcessing3Source(DATASIZE, true, vm, AddOp_SUB, va, vn, vd));
2210 template<int datasize>
2211 ALWAYS_INLINE void fnmul(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
2214 insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FNMUL, vn, vd));
2217 template<int datasize>
2218 ALWAYS_INLINE void vand(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
2220 CHECK_VECTOR_DATASIZE();
2221 insn(vectorDataProcessing2Source(SIMD_LogicalOp_AND, vm, vn, vd));
2224 template<int datasize>
2225 ALWAYS_INLINE void frinta(FPRegisterID vd, FPRegisterID vn)
2228 insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTA, vn, vd));
2231 template<int datasize>
2232 ALWAYS_INLINE void frinti(FPRegisterID vd, FPRegisterID vn)
2235 insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTI, vn, vd));
2238 template<int datasize>
2239 ALWAYS_INLINE void frintm(FPRegisterID vd, FPRegisterID vn)
2242 insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTM, vn, vd));
2245 template<int datasize>
2246 ALWAYS_INLINE void frintn(FPRegisterID vd, FPRegisterID vn)
2249 insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTN, vn, vd));
2252 template<int datasize>
2253 ALWAYS_INLINE void frintp(FPRegisterID vd, FPRegisterID vn)
2256 insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTP, vn, vd));
2259 template<int datasize>
2260 ALWAYS_INLINE void frintx(FPRegisterID vd, FPRegisterID vn)
2263 insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTX, vn, vd));
2266 template<int datasize>
2267 ALWAYS_INLINE void frintz(FPRegisterID vd, FPRegisterID vn)
2270 insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTZ, vn, vd));
2273 template<int datasize>
2274 ALWAYS_INLINE void fsqrt(FPRegisterID vd, FPRegisterID vn)
2277 insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FSQRT, vn, vd));
2280 template<int datasize>
2281 ALWAYS_INLINE void fsub(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
2284 insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FSUB, vn, vd));
2287 template<int datasize>
2288 ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, RegisterID rm)
2290 ldr<datasize>(rt, rn, rm, UXTX, 0);
2293 template<int datasize>
2294 ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
2296 CHECK_FP_MEMOP_DATASIZE();
2297 insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, rm, extend, encodeShiftAmount<datasize>(amount), rn, rt));
2300 template<int datasize>
2301 ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, unsigned pimm)
2303 CHECK_FP_MEMOP_DATASIZE();
2304 insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, encodePositiveImmediate<datasize>(pimm), rn, rt));
2307 template<int datasize>
2308 ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, PostIndex simm)
2310 CHECK_FP_MEMOP_DATASIZE();
2311 insn(loadStoreRegisterPostIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, simm, rn, rt));
2314 template<int datasize>
2315 ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, PreIndex simm)
2317 CHECK_FP_MEMOP_DATASIZE();
2318 insn(loadStoreRegisterPreIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, simm, rn, rt));
2321 template<int datasize>
2322 ALWAYS_INLINE void ldr_literal(FPRegisterID rt, int offset = 0)
2324 CHECK_FP_MEMOP_DATASIZE();
2325 ASSERT(datasize >= 32);
2326 ASSERT(!(offset & 3));
2327 insn(loadRegisterLiteral(datasize == 128 ? LdrLiteralOp_128BIT : datasize == 64 ? LdrLiteralOp_64BIT : LdrLiteralOp_32BIT, true, offset >> 2, rt));
2330 template<int datasize>
2331 ALWAYS_INLINE void ldur(FPRegisterID rt, RegisterID rn, int simm)
2333 CHECK_FP_MEMOP_DATASIZE();
2334 insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, simm, rn, rt));
2337 template<int dstsize, int srcsize>
2338 ALWAYS_INLINE void scvtf(FPRegisterID vd, RegisterID rn)
2340 CHECK_DATASIZE_OF(dstsize);
2341 CHECK_DATASIZE_OF(srcsize);
2342 insn(floatingPointIntegerConversions(DATASIZE_OF(srcsize), DATASIZE_OF(dstsize), FPIntConvOp_SCVTF, rn, vd));
2345 template<int datasize>
2346 ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, RegisterID rm)
2348 str<datasize>(rt, rn, rm, UXTX, 0);
2351 template<int datasize>
2352 ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
2354 CHECK_FP_MEMOP_DATASIZE();
2355 insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, rm, extend, encodeShiftAmount<datasize>(amount), rn, rt));
2358 template<int datasize>
2359 ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, unsigned pimm)
2361 CHECK_FP_MEMOP_DATASIZE();
2362 insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, encodePositiveImmediate<datasize>(pimm), rn, rt));
2365 template<int datasize>
2366 ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, PostIndex simm)
2368 CHECK_FP_MEMOP_DATASIZE();
2369 insn(loadStoreRegisterPostIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, simm, rn, rt));
2372 template<int datasize>
2373 ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, PreIndex simm)
2375 CHECK_FP_MEMOP_DATASIZE();
2376 insn(loadStoreRegisterPreIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, simm, rn, rt));
2379 template<int datasize>
2380 ALWAYS_INLINE void stur(FPRegisterID rt, RegisterID rn, int simm)
2383 insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, simm, rn, rt));
2386 template<int dstsize, int srcsize>
2387 ALWAYS_INLINE void ucvtf(FPRegisterID vd, RegisterID rn)
2389 CHECK_DATASIZE_OF(dstsize);
2390 CHECK_DATASIZE_OF(srcsize);
2391 insn(floatingPointIntegerConversions(DATASIZE_OF(srcsize), DATASIZE_OF(dstsize), FPIntConvOp_UCVTF, rn, vd));
2396 AssemblerLabel labelIgnoringWatchpoints()
2398 return m_buffer.label();
2401 AssemblerLabel labelForWatchpoint()
2403 AssemblerLabel result = m_buffer.label();
2404 if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint)
2406 m_indexOfLastWatchpoint = result.m_offset;
2407 m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
2411 AssemblerLabel label()
2413 AssemblerLabel result = m_buffer.label();
2414 while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
2416 result = m_buffer.label();
2421 AssemblerLabel align(int alignment)
2423 ASSERT(!(alignment & 3));
2424 while (!m_buffer.isAligned(alignment))
2429 static void* getRelocatedAddress(void* code, AssemblerLabel label)
2431 ASSERT(label.isSet());
2432 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset);
2435 static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
2437 return b.m_offset - a.m_offset;
2440 void* unlinkedCode() { return m_buffer.data(); }
2441 size_t codeSize() const { return m_buffer.codeSize(); }
2443 static unsigned getCallReturnOffset(AssemblerLabel call)
2445 ASSERT(call.isSet());
2446 return call.m_offset;
2449 // Linking & patching:
2451 // 'link' and 'patch' methods are for use on unprotected code - such as the code
2452 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
2453 // code has been finalized it is (platform support permitting) within a non-
2454 // writable region of memory; to modify the code in an execute-only execuable
2455 // pool the 'repatch' and 'relink' methods should be used.
2457 void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition)
2460 ASSERT(from.isSet());
2461 m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition));
2464 void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition, bool is64Bit, RegisterID compareRegister)
2467 ASSERT(from.isSet());
2468 m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition, is64Bit, compareRegister));
2471 void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition, unsigned bitNumber, RegisterID compareRegister)
2474 ASSERT(from.isSet());
2475 m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition, bitNumber, compareRegister));
2478 void linkJump(AssemblerLabel from, void* executableCode, AssemblerLabel to)
2480 ASSERT(from.isSet());
2482 relinkJumpOrCall<false>(addressOf(from), addressOf(executableCode, from), addressOf(to));
2485 static void linkJump(void* code, AssemblerLabel from, void* to)
2487 ASSERT(from.isSet());
2488 relinkJumpOrCall<false>(addressOf(code, from), addressOf(code, from), to);
2491 static void linkCall(void* code, AssemblerLabel from, void* to)
2493 ASSERT(from.isSet());
2494 linkJumpOrCall<true>(addressOf(code, from) - 1, addressOf(code, from) - 1, to);
2497 static void linkPointer(void* code, AssemblerLabel where, void* valuePtr)
2499 linkPointer(addressOf(code, where), valuePtr);
2502 static void replaceWithJump(void* where, void* to)
2504 intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(where)) >> 2;
2505 ASSERT(static_cast<int>(offset) == offset);
2506 int insn = unconditionalBranchImmediate(false, static_cast<int>(offset));
2507 performJITMemcpy(where, &insn, sizeof(int));
2508 cacheFlush(where, sizeof(int));
2511 static ptrdiff_t maxJumpReplacementSize()
2516 static void replaceWithLoad(void* where)
2525 if (disassembleAddSubtractImmediate(where, sf, op, S, shift, imm12, rn, rd)) {
2526 ASSERT(sf == Datasize_64);
2527 ASSERT(op == AddOp_ADD);
2530 ASSERT(!(imm12 & ~0xff8));
2531 int insn = loadStoreRegisterUnsignedImmediate(MemOpSize_64, false, MemOp_LOAD, encodePositiveImmediate<64>(imm12), rn, rd);
2532 performJITMemcpy(where, &insn, sizeof(int));
2533 cacheFlush(where, sizeof(int));
2535 #if !ASSERT_DISABLED
2543 ASSERT(disassembleLoadStoreRegisterUnsignedImmediate(where, size, V, opc, imm12, rn, rt));
2544 ASSERT(size == MemOpSize_64);
2546 ASSERT(opc == MemOp_LOAD);
2547 ASSERT(!(imm12 & ~0x1ff));
2552 static void replaceWithAddressComputation(void* where)
2560 if (disassembleLoadStoreRegisterUnsignedImmediate(where, size, V, opc, imm12, rn, rt)) {
2561 ASSERT(size == MemOpSize_64);
2563 ASSERT(opc == MemOp_LOAD);
2564 ASSERT(!(imm12 & ~0x1ff));
2565 int insn = addSubtractImmediate(Datasize_64, AddOp_ADD, DontSetFlags, 0, imm12 * sizeof(void*), rn, rt);
2566 performJITMemcpy(where, &insn, sizeof(int));
2567 cacheFlush(where, sizeof(int));
2569 #if !ASSERT_DISABLED
2578 ASSERT(disassembleAddSubtractImmediate(where, sf, op, S, shift, imm12, rn, rd));
2579 ASSERT(sf == Datasize_64);
2580 ASSERT(op == AddOp_ADD);
2583 ASSERT(!(imm12 & ~0xff8));
2588 static void repatchPointer(void* where, void* valuePtr)
2590 linkPointer(static_cast<int*>(where), valuePtr, true);
2593 static void setPointer(int* address, void* valuePtr, RegisterID rd, bool flush)
2595 uintptr_t value = reinterpret_cast<uintptr_t>(valuePtr);
2597 buffer[0] = moveWideImediate(Datasize_64, MoveWideOp_Z, 0, getHalfword(value, 0), rd);
2598 buffer[1] = moveWideImediate(Datasize_64, MoveWideOp_K, 1, getHalfword(value, 1), rd);
2599 buffer[2] = moveWideImediate(Datasize_64, MoveWideOp_K, 2, getHalfword(value, 2), rd);
2600 performJITMemcpy(address, buffer, sizeof(int) * 3);
2603 cacheFlush(address, sizeof(int) * 3);
2606 static void repatchInt32(void* where, int32_t value)
2608 int* address = static_cast<int*>(where);
2615 bool expected = disassembleMoveWideImediate(address, sf, opc, hw, imm16, rd);
2616 ASSERT_UNUSED(expected, expected && !sf && (opc == MoveWideOp_Z || opc == MoveWideOp_N) && !hw);
2617 ASSERT(checkMovk<Datasize_32>(address[1], 1, rd));
2621 buffer[0] = moveWideImediate(Datasize_32, MoveWideOp_Z, 0, getHalfword(value, 0), rd);
2622 buffer[1] = moveWideImediate(Datasize_32, MoveWideOp_K, 1, getHalfword(value, 1), rd);
2624 buffer[0] = moveWideImediate(Datasize_32, MoveWideOp_N, 0, ~getHalfword(value, 0), rd);
2625 buffer[1] = moveWideImediate(Datasize_32, MoveWideOp_K, 1, getHalfword(value, 1), rd);
2627 performJITMemcpy(where, &buffer, sizeof(int) * 2);
2629 cacheFlush(where, sizeof(int) * 2);
2632 static void* readPointer(void* where)
2634 int* address = static_cast<int*>(where);
2640 RegisterID rdFirst, rd;
2642 bool expected = disassembleMoveWideImediate(address, sf, opc, hw, imm16, rdFirst);
2643 ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_Z && !hw);
2644 uintptr_t result = imm16;
2646 expected = disassembleMoveWideImediate(address + 1, sf, opc, hw, imm16, rd);
2647 ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_K && hw == 1 && rd == rdFirst);
2648 result |= static_cast<uintptr_t>(imm16) << 16;
2650 expected = disassembleMoveWideImediate(address + 2, sf, opc, hw, imm16, rd);
2651 ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_K && hw == 2 && rd == rdFirst);
2652 result |= static_cast<uintptr_t>(imm16) << 32;
2654 return reinterpret_cast<void*>(result);
2657 static void* readCallTarget(void* from)
2659 return readPointer(reinterpret_cast<int*>(from) - 4);
2662 // The static relink, repatch, and replace methods can use can
2663 // use |from| for both the write and executable address for call
2664 // and jump patching as they're modifying existing (linked) code,
2665 // so the address being provided is correct for relative address
2667 static void relinkJump(void* from, void* to)
2669 relinkJumpOrCall<false>(reinterpret_cast<int*>(from), reinterpret_cast<const int*>(from), to);
2670 cacheFlush(from, sizeof(int));
2673 static void relinkCall(void* from, void* to)
2675 relinkJumpOrCall<true>(reinterpret_cast<int*>(from) - 1, reinterpret_cast<const int*>(from) - 1, to);
2676 cacheFlush(reinterpret_cast<int*>(from) - 1, sizeof(int));
2679 static void repatchCompact(void* where, int32_t value)
2681 ASSERT(!(value & ~0x3ff8));
2689 bool expected = disassembleLoadStoreRegisterUnsignedImmediate(where, size, V, opc, imm12, rn, rt);
2690 ASSERT_UNUSED(expected, expected && size >= MemOpSize_32 && !V && opc == MemOp_LOAD); // expect 32/64 bit load to GPR.
2692 if (size == MemOpSize_32)
2693 imm12 = encodePositiveImmediate<32>(value);
2695 imm12 = encodePositiveImmediate<64>(value);
2696 int insn = loadStoreRegisterUnsignedImmediate(size, V, opc, imm12, rn, rt);
2697 performJITMemcpy(where, &insn, sizeof(int));
2699 cacheFlush(where, sizeof(int));
2702 unsigned debugOffset() { return m_buffer.debugOffset(); }
2704 #if OS(LINUX) && COMPILER(GCC_OR_CLANG)
2705 static inline void linuxPageFlush(uintptr_t begin, uintptr_t end)
2707 __builtin___clear_cache(reinterpret_cast<char*>(begin), reinterpret_cast<char*>(end));
2711 static void cacheFlush(void* code, size_t size)
2714 sys_cache_control(kCacheFunctionPrepareForExecution, code, size);
2716 size_t page = pageSize();
2717 uintptr_t current = reinterpret_cast<uintptr_t>(code);
2718 uintptr_t end = current + size;
2719 uintptr_t firstPageEnd = (current & ~(page - 1)) + page;
2721 if (end <= firstPageEnd) {
2722 linuxPageFlush(current, end);
2726 linuxPageFlush(current, firstPageEnd);
2728 for (current = firstPageEnd; current + page < end; current += page)
2729 linuxPageFlush(current, current + page);
2731 linuxPageFlush(current, end);
2733 #error "The cacheFlush support is missing on this platform."
2737 // Assembler admin methods:
2739 static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
2741 static ALWAYS_INLINE bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b)
2743 return a.from() < b.from();
2746 static bool canCompact(JumpType jumpType)
2748 // Fixed jumps cannot be compacted
2749 return (jumpType == JumpNoCondition) || (jumpType == JumpCondition) || (jumpType == JumpCompareAndBranch) || (jumpType == JumpTestBit);
2752 static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
2757 case JumpNoConditionFixedSize:
2758 return LinkJumpNoCondition;
2759 case JumpConditionFixedSize:
2760 return LinkJumpCondition;
2761 case JumpCompareAndBranchFixedSize:
2762 return LinkJumpCompareAndBranch;
2763 case JumpTestBitFixedSize:
2764 return LinkJumpTestBit;
2765 case JumpNoCondition:
2766 return LinkJumpNoCondition;
2767 case JumpCondition: {
2768 ASSERT(!(reinterpret_cast<intptr_t>(from) & 0x3));
2769 ASSERT(!(reinterpret_cast<intptr_t>(to) & 0x3));
2770 intptr_t relative = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(from));
2772 if (((relative << 43) >> 43) == relative)
2773 return LinkJumpConditionDirect;
2775 return LinkJumpCondition;
2777 case JumpCompareAndBranch: {
2778 ASSERT(!(reinterpret_cast<intptr_t>(from) & 0x3));
2779 ASSERT(!(reinterpret_cast<intptr_t>(to) & 0x3));
2780 intptr_t relative = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(from));
2782 if (((relative << 43) >> 43) == relative)
2783 return LinkJumpCompareAndBranchDirect;
2785 return LinkJumpCompareAndBranch;
2788 ASSERT(!(reinterpret_cast<intptr_t>(from) & 0x3));
2789 ASSERT(!(reinterpret_cast<intptr_t>(to) & 0x3));
2790 intptr_t relative = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(from));
2792 if (((relative << 50) >> 50) == relative)
2793 return LinkJumpTestBitDirect;
2795 return LinkJumpTestBit;
2798 ASSERT_NOT_REACHED();
2801 return LinkJumpNoCondition;
2804 static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
2806 JumpLinkType linkType = computeJumpType(record.type(), from, to);
2807 record.setLinkType(linkType);
2811 Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink()
2813 std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator);
2814 return m_jumpsToLink;
2817 static void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction8, uint8_t* to)
2819 const int* fromInstruction = reinterpret_cast<const int*>(fromInstruction8);
2820 switch (record.linkType()) {
2821 case LinkJumpNoCondition:
2822 linkJumpOrCall<false>(reinterpret_cast<int*>(from), fromInstruction, to);
2824 case LinkJumpConditionDirect:
2825 linkConditionalBranch<true>(record.condition(), reinterpret_cast<int*>(from), fromInstruction, to);
2827 case LinkJumpCondition:
2828 linkConditionalBranch<false>(record.condition(), reinterpret_cast<int*>(from) - 1, fromInstruction - 1, to);
2830 case LinkJumpCompareAndBranchDirect:
2831 linkCompareAndBranch<true>(record.condition(), record.is64Bit(), record.compareRegister(), reinterpret_cast<int*>(from), fromInstruction, to);
2833 case LinkJumpCompareAndBranch:
2834 linkCompareAndBranch<false>(record.condition(), record.is64Bit(), record.compareRegister(), reinterpret_cast<int*>(from) - 1, fromInstruction - 1, to);
2836 case LinkJumpTestBitDirect:
2837 linkTestAndBranch<true>(record.condition(), record.bitNumber(), record.compareRegister(), reinterpret_cast<int*>(from), fromInstruction, to);
2839 case LinkJumpTestBit:
2840 linkTestAndBranch<false>(record.condition(), record.bitNumber(), record.compareRegister(), reinterpret_cast<int*>(from) - 1, fromInstruction - 1, to);
2843 ASSERT_NOT_REACHED();
2849 template<Datasize size>
2850 static bool checkMovk(int insn, int _hw, RegisterID _rd)
2857 bool expected = disassembleMoveWideImediate(&insn, sf, opc, hw, imm16, rd);
2861 && opc == MoveWideOp_K
2866 static void linkPointer(int* address, void* valuePtr, bool flush = false)
2873 bool expected = disassembleMoveWideImediate(address, sf, opc, hw, imm16, rd);
2874 ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_Z && !hw);
2875 ASSERT(checkMovk<Datasize_64>(address[1], 1, rd));
2876 ASSERT(checkMovk<Datasize_64>(address[2], 2, rd));
2878 setPointer(address, valuePtr, rd, flush);
2881 template<bool isCall>
2882 static void linkJumpOrCall(int* from, const int* fromInstruction, void* to)
2886 bool isUnconditionalBranchImmediateOrNop = disassembleUnconditionalBranchImmediate(from, link, imm26) || disassembleNop(from);
2888 ASSERT_UNUSED(isUnconditionalBranchImmediateOrNop, isUnconditionalBranchImmediateOrNop);
2889 ASSERT_UNUSED(isCall, (link == isCall) || disassembleNop(from));
2890 ASSERT(!(reinterpret_cast<intptr_t>(from) & 3));
2891 ASSERT(!(reinterpret_cast<intptr_t>(to) & 3));
2892 intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(fromInstruction)) >> 2;
2893 ASSERT(static_cast<int>(offset) == offset);
2895 int insn = unconditionalBranchImmediate(isCall, static_cast<int>(offset));
2896 performJITMemcpy(from, &insn, sizeof(int));
2899 template<bool isDirect>
2900 static void linkCompareAndBranch(Condition condition, bool is64Bit, RegisterID rt, int* from, const int* fromInstruction, void* to)
2902 ASSERT(!(reinterpret_cast<intptr_t>(from) & 3));
2903 ASSERT(!(reinterpret_cast<intptr_t>(to) & 3));
2904 intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(fromInstruction)) >> 2;
2905 ASSERT(((offset << 38) >> 38) == offset);
2907 bool useDirect = ((offset << 45) >> 45) == offset; // Fits in 19 bits
2908 ASSERT(!isDirect || useDirect);
2910 if (useDirect || isDirect) {
2911 int insn = compareAndBranchImmediate(is64Bit ? Datasize_64 : Datasize_32, condition == ConditionNE, static_cast<int>(offset), rt);
2912 performJITMemcpy(from, &insn, sizeof(int));
2915 performJITMemcpy(from + 1, &insn, sizeof(int));
2918 int insn = compareAndBranchImmediate(is64Bit ? Datasize_64 : Datasize_32, invert(condition) == ConditionNE, 2, rt);
2919 performJITMemcpy(from, &insn, sizeof(int));
2920 linkJumpOrCall<false>(from + 1, fromInstruction + 1, to);
2924 template<bool isDirect>
2925 static void linkConditionalBranch(Condition condition, int* from, const int* fromInstruction, void* to)
2927 ASSERT(!(reinterpret_cast<intptr_t>(from) & 3));
2928 ASSERT(!(reinterpret_cast<intptr_t>(to) & 3));
2929 intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(fromInstruction)) >> 2;
2930 ASSERT(((offset << 38) >> 38) == offset);
2932 bool useDirect = ((offset << 45) >> 45) == offset; // Fits in 19 bits
2933 ASSERT(!isDirect || useDirect);
2935 if (useDirect || isDirect) {
2936 int insn = conditionalBranchImmediate(static_cast<int>(offset), condition);
2937 performJITMemcpy(from, &insn, sizeof(int));
2940 performJITMemcpy(from + 1, &insn, sizeof(int));
2943 int insn = conditionalBranchImmediate(2, invert(condition));
2944 performJITMemcpy(from, &insn, sizeof(int));
2945 linkJumpOrCall<false>(from + 1, fromInstruction + 1, to);
2949 template<bool isDirect>
2950 static void linkTestAndBranch(Condition condition, unsigned bitNumber, RegisterID rt, int* from, const int* fromInstruction, void* to)
2952 ASSERT(!(reinterpret_cast<intptr_t>(from) & 3));
2953 ASSERT(!(reinterpret_cast<intptr_t>(to) & 3));
2954 intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(fromInstruction)) >> 2;
2955 ASSERT(static_cast<int>(offset) == offset);
2956 ASSERT(((offset << 38) >> 38) == offset);
2958 bool useDirect = ((offset << 50) >> 50) == offset; // Fits in 14 bits
2959 ASSERT(!isDirect || useDirect);
2961 if (useDirect || isDirect) {
2962 int insn = testAndBranchImmediate(condition == ConditionNE, static_cast<int>(bitNumber), static_cast<int>(offset), rt);
2963 performJITMemcpy(from, &insn, sizeof(int));
2966 performJITMemcpy(from + 1, &insn, sizeof(int));
2969 int insn = testAndBranchImmediate(invert(condition) == ConditionNE, static_cast<int>(bitNumber), 2, rt);
2970 performJITMemcpy(from, &insn, sizeof(int));
2971 linkJumpOrCall<false>(from + 1, fromInstruction + 1, to);
2975 template<bool isCall>
2976 static void relinkJumpOrCall(int* from, const int* fromInstruction, void* to)
2978 if (!isCall && disassembleNop(from)) {
2981 Condition condition;
2982 bool isConditionalBranchImmediate = disassembleConditionalBranchImmediate(from - 1, op01, imm19, condition);
2984 if (isConditionalBranchImmediate) {
2985 ASSERT_UNUSED(op01, !op01);
2986 ASSERT_UNUSED(isCall, !isCall);
2989 condition = invert(condition);
2991 linkConditionalBranch<false>(condition, from - 1, fromInstruction - 1, to);
2998 bool isCompareAndBranchImmediate = disassembleCompareAndBranchImmediate(from - 1, opSize, op, imm19, rt);
3000 if (isCompareAndBranchImmediate) {
3004 linkCompareAndBranch<false>(op ? ConditionNE : ConditionEQ, opSize == Datasize_64, rt, from - 1, fromInstruction - 1, to);
3010 bool isTestAndBranchImmediate = disassembleTestAndBranchImmediate(from - 1, op, bitNumber, imm14, rt);
3012 if (isTestAndBranchImmediate) {
3016 linkTestAndBranch<false>(op ? ConditionNE : ConditionEQ, bitNumber, rt, from - 1, fromInstruction - 1, to);
3021 linkJumpOrCall<isCall>(from, fromInstruction, to);
3024 static int* addressOf(void* code, AssemblerLabel label)
3026 return reinterpret_cast<int*>(static_cast<char*>(code) + label.m_offset);
3029 int* addressOf(AssemblerLabel label)
3031 return addressOf(m_buffer.data(), label);
3034 static RegisterID disassembleXOrSp(int reg) { return reg == 31 ? ARM64Registers::sp : static_cast<RegisterID>(reg); }
3035 static RegisterID disassembleXOrZr(int reg) { return reg == 31 ? ARM64Registers::zr : static_cast<RegisterID>(reg); }
3036 static RegisterID disassembleXOrZrOrSp(bool useZr, int reg) { return reg == 31 ? (useZr ? ARM64Registers::zr : ARM64Registers::sp) : static_cast<RegisterID>(reg); }
3038 static bool disassembleAddSubtractImmediate(void* address, Datasize& sf, AddOp& op, SetFlags& S, int& shift, int& imm12, RegisterID& rn, RegisterID& rd)
3040 int insn = *static_cast<int*>(address);
3041 sf = static_cast<Datasize>((insn >> 31) & 1);
3042 op = static_cast<AddOp>((insn >> 30) & 1);
3043 S = static_cast<SetFlags>((insn >> 29) & 1);
3044 shift = (insn >> 22) & 3;
3045 imm12 = (insn >> 10) & 0x3ff;
3046 rn = disassembleXOrSp((insn >> 5) & 0x1f);
3047 rd = disassembleXOrZrOrSp(S, insn & 0x1f);
3048 return (insn & 0x1f000000) == 0x11000000;
3051 static bool disassembleLoadStoreRegisterUnsignedImmediate(void* address, MemOpSize& size, bool& V, MemOp& opc, int& imm12, RegisterID& rn, RegisterID& rt)
3053 int insn = *static_cast<int*>(address);
3054 size = static_cast<MemOpSize>((insn >> 30) & 3);
3055 V = (insn >> 26) & 1;
3056 opc = static_cast<MemOp>((insn >> 22) & 3);
3057 imm12 = (insn >> 10) & 0xfff;
3058 rn = disassembleXOrSp((insn >> 5) & 0x1f);
3059 rt = disassembleXOrZr(insn & 0x1f);
3060 return (insn & 0x3b000000) == 0x39000000;
3063 static bool disassembleMoveWideImediate(void* address, Datasize& sf, MoveWideOp& opc, int& hw, uint16_t& imm16, RegisterID& rd)
3065 int insn = *static_cast<int*>(address);
3066 sf = static_cast<Datasize>((insn >> 31) & 1);
3067 opc = static_cast<MoveWideOp>((insn >> 29) & 3);
3068 hw = (insn >> 21) & 3;
3070 rd = disassembleXOrZr(insn & 0x1f);
3071 return (insn & 0x1f800000) == 0x12800000;
3074 static bool disassembleNop(void* address)
3076 unsigned insn = *static_cast<unsigned*>(address);
3077 return insn == 0xd503201f;
3080 static bool disassembleCompareAndBranchImmediate(void* address, Datasize& sf, bool& op, int& imm19, RegisterID& rt)
3082 int insn = *static_cast<int*>(address);
3083 sf = static_cast<Datasize>((insn >> 31) & 1);
3084 op = (insn >> 24) & 0x1;
3085 imm19 = (insn << 8) >> 13;
3086 rt = static_cast<RegisterID>(insn & 0x1f);
3087 return (insn & 0x7e000000) == 0x34000000;
3091 static bool disassembleConditionalBranchImmediate(void* address, unsigned& op01, int& imm19, Condition &condition)
3093 int insn = *static_cast<int*>(address);
3094 op01 = ((insn >> 23) & 0x2) | ((insn >> 4) & 0x1);
3095 imm19 = (insn << 8) >> 13;
3096 condition = static_cast<Condition>(insn & 0xf);
3097 return (insn & 0xfe000000) == 0x54000000;
3100 static bool disassembleTestAndBranchImmediate(void* address, bool& op, unsigned& bitNumber, int& imm14, RegisterID& rt)
3102 int insn = *static_cast<int*>(address);
3103 op = (insn >> 24) & 0x1;
3104 imm14 = (insn << 13) >> 18;
3105 bitNumber = static_cast<unsigned>((((insn >> 26) & 0x20)) | ((insn >> 19) & 0x1f));
3106 rt = static_cast<RegisterID>(insn & 0x1f);
3107 return (insn & 0x7e000000) == 0x36000000;
3111 static bool disassembleUnconditionalBranchImmediate(void* address, bool& op, int& imm26)
3113 int insn = *static_cast<int*>(address);
3114 op = (insn >> 31) & 1;
3115 imm26 = (insn << 6) >> 6;
3116 return (insn & 0x7c000000) == 0x14000000;
3119 static int xOrSp(RegisterID reg)
3122 ASSERT(!isIOS() || reg != ARM64Registers::x18);
3125 static int xOrZr(RegisterID reg)
3128 ASSERT(!isIOS() || reg != ARM64Registers::x18);
3131 static FPRegisterID xOrZrAsFPR(RegisterID reg) { return static_cast<FPRegisterID>(xOrZr(reg)); }
3132 static int xOrZrOrSp(bool useZr, RegisterID reg) { return useZr ? xOrZr(reg) : xOrSp(reg); }
3134 ALWAYS_INLINE void insn(int instruction)
3136 m_buffer.putInt(instruction);
3139 ALWAYS_INLINE static int addSubtractExtendedRegister(Datasize sf, AddOp op, SetFlags S, RegisterID rm, ExtendType option, int imm3, RegisterID rn, RegisterID rd)
3142 // The only allocated values for opt is 0.
3144 return (0x0b200000 | sf << 31 | op << 30 | S << 29 | opt << 22 | xOrZr(rm) << 16 | option << 13 | (imm3 & 0x7) << 10 | xOrSp(rn) << 5 | xOrZrOrSp(S, rd));
3147 ALWAYS_INLINE static int addSubtractImmediate(Datasize sf, AddOp op, SetFlags S, int shift, int imm12, RegisterID rn, RegisterID rd)
3150 ASSERT(isUInt12(imm12));
3151 return (0x11000000 | sf << 31 | op << 30 | S << 29 | shift << 22 | (imm12 & 0xfff) << 10 | xOrSp(rn) << 5 | xOrZrOrSp(S, rd));
3154 ALWAYS_INLINE static int addSubtractShiftedRegister(Datasize sf, AddOp op, SetFlags S, ShiftType shift, RegisterID rm, int imm6, RegisterID rn, RegisterID rd)
3157 ASSERT(!(imm6 & (sf ? ~63 : ~31)));
3158 return (0x0b000000 | sf << 31 | op << 30 | S << 29 | shift << 22 | xOrZr(rm) << 16 | (imm6 & 0x3f) << 10 | xOrZr(rn) << 5 | xOrZr(rd));
3161 ALWAYS_INLINE static int addSubtractWithCarry(Datasize sf, AddOp op, SetFlags S, RegisterID rm, RegisterID rn, RegisterID rd)
3163 const int opcode2 = 0;
3164 return (0x1a000000 | sf << 31 | op << 30 | S << 29 | xOrZr(rm) << 16 | opcode2 << 10 | xOrZr(rn) << 5 | xOrZr(rd));
3167 ALWAYS_INLINE static int bitfield(Datasize sf, BitfieldOp opc, int immr, int imms, RegisterID rn, RegisterID rd)
3169 ASSERT(immr < (sf ? 64 : 32));
3170 ASSERT(imms < (sf ? 64 : 32));
3172 return (0x13000000 | sf << 31 | opc << 29 | N << 22 | immr << 16 | imms << 10 | xOrZr(rn) << 5 | xOrZr(rd));
3175 // 'op' means negate
3176 ALWAYS_INLINE static int compareAndBranchImmediate(Datasize sf, bool op, int32_t imm19, RegisterID rt)
3178 ASSERT(imm19 == (imm19 << 13) >> 13);
3179 return (0x34000000 | sf << 31 | op << 24 | (imm19 & 0x7ffff) << 5 | xOrZr(rt));
3182 ALWAYS_INLINE static int conditionalBranchImmediate(int32_t imm19, Condition cond)
3184 ASSERT(imm19 == (imm19 << 13) >> 13);
3185 ASSERT(!(cond & ~15));
3186 // The only allocated values for o1 & o0 are 0.
3189 return (0x54000000 | o1 << 24 | (imm19 & 0x7ffff) << 5 | o0 << 4 | cond);
3192 ALWAYS_INLINE static int conditionalCompareImmediate(Datasize sf, AddOp op, int imm5, Condition cond, RegisterID rn, int nzcv)
3194 ASSERT(!(imm5 & ~0x1f));
3199 return (0x1a400800 | sf << 31 | op << 30 | S << 29 | (imm5 & 0x1f) << 16 | cond << 12 | o2 << 10 | xOrZr(rn) << 5 | o3 << 4 | nzcv);
3202 ALWAYS_INLINE static int conditionalCompareRegister(Datasize sf, AddOp op, RegisterID rm, Condition cond, RegisterID rn, int nzcv)
3208 return (0x1a400000 | sf << 31 | op << 30 | S << 29 | xOrZr(rm) << 16 | cond << 12 | o2 << 10 | xOrZr(rn) << 5 | o3 << 4 | nzcv);
3211 // 'op' means negate
3212 // 'op2' means increment
3213 ALWAYS_INLINE static int conditionalSelect(Datasize sf, bool op, RegisterID rm, Condition cond, bool op2, RegisterID rn, RegisterID rd)
3216 return (0x1a800000 | sf << 31 | op << 30 | S << 29 | xOrZr(rm) << 16 | cond << 12 | op2 << 10 | xOrZr(rn) << 5 | xOrZr(rd));
3219 ALWAYS_INLINE static int dataProcessing1Source(Datasize sf, DataOp1Source opcode, RegisterID rn, RegisterID rd)
3222 const int opcode2 = 0;
3223 return (0x5ac00000 | sf << 31 | S << 29 | opcode2 << 16 | opcode << 10 | xOrZr(rn) << 5 | xOrZr(rd));
3226 ALWAYS_INLINE static int dataProcessing2Source(Datasize sf, RegisterID rm, DataOp2Source opcode, RegisterID rn, RegisterID rd)
3229 return (0x1ac00000 | sf << 31 | S << 29 | xOrZr(rm) << 16 | opcode << 10 | xOrZr(rn) << 5 | xOrZr(rd));
3232 ALWAYS_INLINE static int dataProcessing3Source(Datasize sf, DataOp3Source opcode, RegisterID rm, RegisterID ra, RegisterID rn, RegisterID rd)
3234 int op54 = opcode >> 4;
3235 int op31 = (opcode >> 1) & 7;
3236 int op0 = opcode & 1;
3237 return (0x1b000000 | sf << 31 | op54 << 29 | op31 << 21 | xOrZr(rm) << 16 | op0 << 15 | xOrZr(ra) << 10 | xOrZr(rn) << 5 | xOrZr(rd));
3240 ALWAYS_INLINE static int excepnGeneration(ExcepnOp opc, uint16_t imm16, int LL)
3242 ASSERT((opc == ExcepnOp_BREAKPOINT || opc == ExcepnOp_HALT) ? !LL : (LL && (LL < 4)));
3244 return (0xd4000000 | opc << 21 | imm16 << 5 | op2 << 2 | LL);
3247 ALWAYS_INLINE static int extract(Datasize sf, RegisterID rm, int imms, RegisterID rn, RegisterID rd)
3249 ASSERT(imms < (sf ? 64 : 32));
3253 return (0x13800000 | sf << 31 | op21 << 29 | N << 22 | o0 << 21 | xOrZr(rm) << 16 | imms << 10 | xOrZr(rn) << 5 | xOrZr(rd));
3256 ALWAYS_INLINE static int floatingPointCompare(Datasize type, FPRegisterID rm, FPRegisterID rn, FPCmpOp opcode2)
3261 return (0x1e202000 | M << 31 | S << 29 | type << 22 | rm << 16 | op << 14 | rn << 5 | opcode2);
3264 ALWAYS_INLINE static int floatingPointConditionalCompare(Datasize type, FPRegisterID rm, Condition cond, FPRegisterID rn, FPCondCmpOp op, int nzcv)
3269 return (0x1e200400 | M << 31 | S << 29 | type << 22 | rm << 16 | cond << 12 | rn << 5 | op << 4 | nzcv);
3272 ALWAYS_INLINE static int floatingPointConditionalSelect(Datasize type, FPRegisterID rm, Condition cond, FPRegisterID rn, FPRegisterID rd)
3276 return (0x1e200c00 | M << 31 | S << 29 | type << 22 | rm << 16 | cond << 12 | rn << 5 | rd);
3279 ALWAYS_INLINE static int floatingPointImmediate(Datasize type, int imm8, FPRegisterID rd)
3284 return (0x1e201000 | M << 31 | S << 29 | type << 22 | (imm8 & 0xff) << 13 | imm5 << 5 | rd);
3287 ALWAYS_INLINE static int floatingPointIntegerConversions(Datasize sf, Datasize type, FPIntConvOp rmodeOpcode, FPRegisterID rn, FPRegisterID rd)
3290 return (0x1e200000 | sf << 31 | S << 29 | type << 22 | rmodeOpcode << 16 | rn << 5 | rd);
3293 ALWAYS_INLINE static int floatingPointIntegerConversions(Datasize sf, Datasize type, FPIntConvOp rmodeOpcode, FPRegisterID rn, RegisterID rd)
3295 return floatingPointIntegerConversions(sf, type, rmodeOpcode, rn, xOrZrAsFPR(rd));
3298 ALWAYS_INLINE static int floatingPointIntegerConversions(Datasize sf, Datasize type, FPIntConvOp rmodeOpcode, RegisterID rn, FPRegisterID rd)
3300 return floatingPointIntegerConversions(sf, type, rmodeOpcode, xOrZrAsFPR(rn), rd);
3303 ALWAYS_INLINE static int floatingPointDataProcessing1Source(Datasize type, FPDataOp1Source opcode, FPRegisterID rn, FPRegisterID rd)
3307 return (0x1e204000 | M << 31 | S << 29 | type << 22 | opcode << 15 | rn << 5 | rd);
3310 ALWAYS_INLINE static int floatingPointDataProcessing2Source(Datasize type, FPRegisterID rm, FPDataOp2Source opcode, FPRegisterID rn, FPRegisterID rd)
3314 return (0x1e200800 | M << 31 | S << 29 | type << 22 | rm << 16 | opcode << 12 | rn << 5 | rd);
3317 ALWAYS_INLINE static int vectorDataProcessing2Source(SIMD3Same opcode, unsigned size, FPRegisterID vm, FPRegisterID vn, FPRegisterID vd)
3320 return (0xe201c00 | Q << 30 | size << 22 | vm << 16 | opcode << 11 | vn << 5 | vd);
3323 ALWAYS_INLINE static int vectorDataProcessing2Source(SIMD3Same opcode, FPRegisterID vm, FPRegisterID vn, FPRegisterID vd)
3325 return vectorDataProcessing2Source(opcode, 0, vm, vn, vd);