2 * Copyright (C) 2009, 2010, 2012, 2013, 2014 Apple Inc. All rights reserved.
3 * Copyright (C) 2010 University of Szeged
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #ifndef ARMAssembler_h
28 #define ARMAssembler_h
30 #if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
32 #include "AssemblerBuffer.h"
33 #include "AssemblerCommon.h"
35 #include <wtf/Assertions.h>
36 #include <wtf/Vector.h>
41 namespace ARMRegisters {
43 #define FOR_EACH_CPU_REGISTER(V) \
44 FOR_EACH_CPU_GPREGISTER(V) \
45 FOR_EACH_CPU_SPECIAL_REGISTER(V) \
46 FOR_EACH_CPU_FPREGISTER(V)
48 // The following are defined as pairs of the following value:
49 // 1. type of the storage needed to save the register value by the JIT probe.
50 // 2. name of the register.
51 #define FOR_EACH_CPU_GPREGISTER(V) \
69 #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
73 #define FOR_EACH_CPU_FPREGISTER(V) \
108 #define DECLARE_REGISTER(_type, _regName) _regName,
109 FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
110 #undef DECLARE_REGISTER
112 fp = r7, // frame pointer
113 sb = r9, // static base
114 sl = r10, // stack limit
154 } FPSingleRegisterID;
157 #define DECLARE_REGISTER(_type, _regName) _regName,
158 FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
159 #undef DECLARE_REGISTER
160 } FPDoubleRegisterID;
197 inline FPSingleRegisterID asSingle(FPDoubleRegisterID reg)
200 return (FPSingleRegisterID)(reg << 1);
203 inline FPDoubleRegisterID asDouble(FPSingleRegisterID reg)
206 return (FPDoubleRegisterID)(reg >> 1);
209 } // namespace ARMRegisters
211 class ARMv7Assembler;
212 class ARMThumbImmediate {
213 friend class ARMv7Assembler;
215 typedef uint8_t ThumbImmediateType;
216 static const ThumbImmediateType TypeInvalid = 0;
217 static const ThumbImmediateType TypeEncoded = 1;
218 static const ThumbImmediateType TypeUInt16 = 2;
228 // If this is an encoded immediate, then it may describe a shift, or a pattern.
230 unsigned shiftValue7 : 7;
231 unsigned shiftAmount : 5;
234 unsigned immediate : 8;
235 unsigned pattern : 4;
237 } ThumbImmediateValue;
239 // byte0 contains least significant bit; not using an array to make client code endian agnostic.
250 ALWAYS_INLINE static void countLeadingZerosPartial(uint32_t& value, int32_t& zeros, const int N)
252 if (value & ~((1 << N) - 1)) /* check for any of the top N bits (of 2N bits) are set */
253 value >>= N; /* if any were set, lose the bottom N */
254 else /* if none of the top N bits are set, */
255 zeros += N; /* then we have identified N leading zeros */
258 static int32_t countLeadingZeros(uint32_t value)
264 countLeadingZerosPartial(value, zeros, 16);
265 countLeadingZerosPartial(value, zeros, 8);
266 countLeadingZerosPartial(value, zeros, 4);
267 countLeadingZerosPartial(value, zeros, 2);
268 countLeadingZerosPartial(value, zeros, 1);
273 : m_type(TypeInvalid)
278 ARMThumbImmediate(ThumbImmediateType type, ThumbImmediateValue value)
284 ARMThumbImmediate(ThumbImmediateType type, uint16_t value)
287 // Make sure this constructor is only reached with type TypeUInt16;
288 // this extra parameter makes the code a little clearer by making it
289 // explicit at call sites which type is being constructed
290 ASSERT_UNUSED(type, type == TypeUInt16);
292 m_value.asInt = value;
296 static ARMThumbImmediate makeEncodedImm(uint32_t value)
298 ThumbImmediateValue encoding;
301 // okay, these are easy.
303 encoding.immediate = value;
304 encoding.pattern = 0;
305 return ARMThumbImmediate(TypeEncoded, encoding);
308 int32_t leadingZeros = countLeadingZeros(value);
309 // if there were 24 or more leading zeros, then we'd have hit the (value < 256) case.
310 ASSERT(leadingZeros < 24);
312 // Given a number with bit fields Z:B:C, where count(Z)+count(B)+count(C) == 32,
313 // Z are the bits known zero, B is the 8-bit immediate, C are the bits to check for
314 // zero. count(B) == 8, so the count of bits to be checked is 24 - count(Z).
315 int32_t rightShiftAmount = 24 - leadingZeros;
316 if (value == ((value >> rightShiftAmount) << rightShiftAmount)) {
317 // Shift the value down to the low byte position. The assign to
318 // shiftValue7 drops the implicit top bit.
319 encoding.shiftValue7 = value >> rightShiftAmount;
320 // The endoded shift amount is the magnitude of a right rotate.
321 encoding.shiftAmount = 8 + leadingZeros;
322 return ARMThumbImmediate(TypeEncoded, encoding);
328 if ((bytes.byte0 == bytes.byte1) && (bytes.byte0 == bytes.byte2) && (bytes.byte0 == bytes.byte3)) {
329 encoding.immediate = bytes.byte0;
330 encoding.pattern = 3;
331 return ARMThumbImmediate(TypeEncoded, encoding);
334 if ((bytes.byte0 == bytes.byte2) && !(bytes.byte1 | bytes.byte3)) {
335 encoding.immediate = bytes.byte0;
336 encoding.pattern = 1;
337 return ARMThumbImmediate(TypeEncoded, encoding);
340 if ((bytes.byte1 == bytes.byte3) && !(bytes.byte0 | bytes.byte2)) {
341 encoding.immediate = bytes.byte1;
342 encoding.pattern = 2;
343 return ARMThumbImmediate(TypeEncoded, encoding);
346 return ARMThumbImmediate();
349 static ARMThumbImmediate makeUInt12(int32_t value)
351 return (!(value & 0xfffff000))
352 ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
353 : ARMThumbImmediate();
356 static ARMThumbImmediate makeUInt12OrEncodedImm(int32_t value)
358 // If this is not a 12-bit unsigned it, try making an encoded immediate.
359 return (!(value & 0xfffff000))
360 ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
361 : makeEncodedImm(value);
364 // The 'make' methods, above, return a !isValid() value if the argument
365 // cannot be represented as the requested type. This methods is called
366 // 'get' since the argument can always be represented.
367 static ARMThumbImmediate makeUInt16(uint16_t value)
369 return ARMThumbImmediate(TypeUInt16, value);
374 return m_type != TypeInvalid;
377 uint16_t asUInt16() const { return m_value.asInt; }
379 // These methods rely on the format of encoded byte values.
380 bool isUInt3() { return !(m_value.asInt & 0xfff8); }
381 bool isUInt4() { return !(m_value.asInt & 0xfff0); }
382 bool isUInt5() { return !(m_value.asInt & 0xffe0); }
383 bool isUInt6() { return !(m_value.asInt & 0xffc0); }
384 bool isUInt7() { return !(m_value.asInt & 0xff80); }
385 bool isUInt8() { return !(m_value.asInt & 0xff00); }
386 bool isUInt9() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfe00); }
387 bool isUInt10() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfc00); }
388 bool isUInt12() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xf000); }
389 bool isUInt16() { return m_type == TypeUInt16; }
390 uint8_t getUInt3() { ASSERT(isUInt3()); return m_value.asInt; }
391 uint8_t getUInt4() { ASSERT(isUInt4()); return m_value.asInt; }
392 uint8_t getUInt5() { ASSERT(isUInt5()); return m_value.asInt; }
393 uint8_t getUInt6() { ASSERT(isUInt6()); return m_value.asInt; }
394 uint8_t getUInt7() { ASSERT(isUInt7()); return m_value.asInt; }
395 uint8_t getUInt8() { ASSERT(isUInt8()); return m_value.asInt; }
396 uint16_t getUInt9() { ASSERT(isUInt9()); return m_value.asInt; }
397 uint16_t getUInt10() { ASSERT(isUInt10()); return m_value.asInt; }
398 uint16_t getUInt12() { ASSERT(isUInt12()); return m_value.asInt; }
399 uint16_t getUInt16() { ASSERT(isUInt16()); return m_value.asInt; }
401 bool isEncodedImm() { return m_type == TypeEncoded; }
404 ThumbImmediateType m_type;
405 ThumbImmediateValue m_value;
414 SRType_RRX = SRType_ROR
417 class ShiftTypeAndAmount {
418 friend class ARMv7Assembler;
423 m_u.type = (ARMShiftType)0;
427 ShiftTypeAndAmount(ARMShiftType type, unsigned amount)
430 m_u.amount = amount & 31;
433 unsigned lo4() { return m_u.lo4; }
434 unsigned hi4() { return m_u.hi4; }
449 class ARMv7Assembler {
451 typedef ARMRegisters::RegisterID RegisterID;
452 typedef ARMRegisters::FPSingleRegisterID FPSingleRegisterID;
453 typedef ARMRegisters::FPDoubleRegisterID FPDoubleRegisterID;
454 typedef ARMRegisters::FPQuadRegisterID FPQuadRegisterID;
455 typedef FPDoubleRegisterID FPRegisterID;
457 static constexpr RegisterID firstRegister() { return ARMRegisters::r0; }
458 static constexpr RegisterID lastRegister() { return ARMRegisters::r13; }
460 static constexpr FPRegisterID firstFPRegister() { return ARMRegisters::d0; }
461 static constexpr FPRegisterID lastFPRegister() { return ARMRegisters::d31; }
463 // (HS, LO, HI, LS) -> (AE, B, A, BE)
464 // (VS, VC) -> (O, NO)
466 ConditionEQ, // Zero / Equal.
467 ConditionNE, // Non-zero / Not equal.
468 ConditionHS, ConditionCS = ConditionHS, // Unsigned higher or same.
469 ConditionLO, ConditionCC = ConditionLO, // Unsigned lower.
470 ConditionMI, // Negative.
471 ConditionPL, // Positive or zero.
472 ConditionVS, // Overflowed.
473 ConditionVC, // Not overflowed.
474 ConditionHI, // Unsigned higher.
475 ConditionLS, // Unsigned lower or same.
476 ConditionGE, // Signed greater than or equal.
477 ConditionLT, // Signed less than.
478 ConditionGT, // Signed greater than.
479 ConditionLE, // Signed less than or equal.
480 ConditionAL, // Unconditional / Always execute.
484 #define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 3) | (index))
485 #define JUMP_ENUM_SIZE(jump) ((jump) >> 3)
486 enum JumpType { JumpFixed = JUMP_ENUM_WITH_SIZE(0, 0),
487 JumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 5 * sizeof(uint16_t)),
488 JumpCondition = JUMP_ENUM_WITH_SIZE(2, 6 * sizeof(uint16_t)),
489 JumpNoConditionFixedSize = JUMP_ENUM_WITH_SIZE(3, 5 * sizeof(uint16_t)),
490 JumpConditionFixedSize = JUMP_ENUM_WITH_SIZE(4, 6 * sizeof(uint16_t))
493 LinkInvalid = JUMP_ENUM_WITH_SIZE(0, 0),
494 LinkJumpT1 = JUMP_ENUM_WITH_SIZE(1, sizeof(uint16_t)),
495 LinkJumpT2 = JUMP_ENUM_WITH_SIZE(2, sizeof(uint16_t)),
496 LinkJumpT3 = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint16_t)),
497 LinkJumpT4 = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint16_t)),
498 LinkConditionalJumpT4 = JUMP_ENUM_WITH_SIZE(5, 3 * sizeof(uint16_t)),
499 LinkBX = JUMP_ENUM_WITH_SIZE(6, 5 * sizeof(uint16_t)),
500 LinkConditionalBX = JUMP_ENUM_WITH_SIZE(7, 6 * sizeof(uint16_t))
505 LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition)
507 data.realTypes.m_from = from;
508 data.realTypes.m_to = to;
509 data.realTypes.m_type = type;
510 data.realTypes.m_linkType = LinkInvalid;
511 data.realTypes.m_condition = condition;
513 void operator=(const LinkRecord& other)
515 data.copyTypes.content[0] = other.data.copyTypes.content[0];
516 data.copyTypes.content[1] = other.data.copyTypes.content[1];
517 data.copyTypes.content[2] = other.data.copyTypes.content[2];
519 intptr_t from() const { return data.realTypes.m_from; }
520 void setFrom(intptr_t from) { data.realTypes.m_from = from; }
521 intptr_t to() const { return data.realTypes.m_to; }
522 JumpType type() const { return data.realTypes.m_type; }
523 JumpLinkType linkType() const { return data.realTypes.m_linkType; }
524 void setLinkType(JumpLinkType linkType) { ASSERT(data.realTypes.m_linkType == LinkInvalid); data.realTypes.m_linkType = linkType; }
525 Condition condition() const { return data.realTypes.m_condition; }
529 intptr_t m_from : 31;
532 JumpLinkType m_linkType : 8;
533 Condition m_condition : 16;
538 COMPILE_ASSERT(sizeof(RealTypes) == sizeof(CopyTypes), LinkRecordCopyStructSizeEqualsRealStruct);
543 : m_indexOfLastWatchpoint(INT_MIN)
544 , m_indexOfTailOfLastWatchpoint(INT_MIN)
548 AssemblerBuffer& buffer() { return m_formatter.m_buffer; }
553 static bool BadReg(RegisterID reg)
555 return (reg == ARMRegisters::sp) || (reg == ARMRegisters::pc);
558 uint32_t singleRegisterMask(FPSingleRegisterID rdNum, int highBitsShift, int lowBitShift)
560 uint32_t rdMask = (rdNum >> 1) << highBitsShift;
562 rdMask |= 1 << lowBitShift;
566 uint32_t doubleRegisterMask(FPDoubleRegisterID rdNum, int highBitShift, int lowBitsShift)
568 uint32_t rdMask = (rdNum & 0xf) << lowBitsShift;
570 rdMask |= 1 << highBitShift;
575 OP_ADD_reg_T1 = 0x1800,
576 OP_SUB_reg_T1 = 0x1A00,
577 OP_ADD_imm_T1 = 0x1C00,
578 OP_SUB_imm_T1 = 0x1E00,
579 OP_MOV_imm_T1 = 0x2000,
580 OP_CMP_imm_T1 = 0x2800,
581 OP_ADD_imm_T2 = 0x3000,
582 OP_SUB_imm_T2 = 0x3800,
583 OP_AND_reg_T1 = 0x4000,
584 OP_EOR_reg_T1 = 0x4040,
585 OP_TST_reg_T1 = 0x4200,
586 OP_RSB_imm_T1 = 0x4240,
587 OP_CMP_reg_T1 = 0x4280,
588 OP_ORR_reg_T1 = 0x4300,
589 OP_MVN_reg_T1 = 0x43C0,
590 OP_ADD_reg_T2 = 0x4400,
591 OP_MOV_reg_T1 = 0x4600,
594 OP_STR_reg_T1 = 0x5000,
595 OP_STRH_reg_T1 = 0x5200,
596 OP_STRB_reg_T1 = 0x5400,
597 OP_LDRSB_reg_T1 = 0x5600,
598 OP_LDR_reg_T1 = 0x5800,
599 OP_LDRH_reg_T1 = 0x5A00,
600 OP_LDRB_reg_T1 = 0x5C00,
601 OP_LDRSH_reg_T1 = 0x5E00,
602 OP_STR_imm_T1 = 0x6000,
603 OP_LDR_imm_T1 = 0x6800,
604 OP_STRB_imm_T1 = 0x7000,
605 OP_LDRB_imm_T1 = 0x7800,
606 OP_STRH_imm_T1 = 0x8000,
607 OP_LDRH_imm_T1 = 0x8800,
608 OP_STR_imm_T2 = 0x9000,
609 OP_LDR_imm_T2 = 0x9800,
610 OP_ADD_SP_imm_T1 = 0xA800,
611 OP_ADD_SP_imm_T2 = 0xB000,
612 OP_SUB_SP_imm_T1 = 0xB080,
625 OP_AND_reg_T2 = 0xEA00,
626 OP_TST_reg_T2 = 0xEA10,
627 OP_ORR_reg_T2 = 0xEA40,
628 OP_ORR_S_reg_T2 = 0xEA50,
629 OP_ASR_imm_T1 = 0xEA4F,
630 OP_LSL_imm_T1 = 0xEA4F,
631 OP_LSR_imm_T1 = 0xEA4F,
632 OP_ROR_imm_T1 = 0xEA4F,
633 OP_MVN_reg_T2 = 0xEA6F,
634 OP_EOR_reg_T2 = 0xEA80,
635 OP_ADD_reg_T3 = 0xEB00,
636 OP_ADD_S_reg_T3 = 0xEB10,
637 OP_SUB_reg_T2 = 0xEBA0,
638 OP_SUB_S_reg_T2 = 0xEBB0,
639 OP_CMP_reg_T2 = 0xEBB0,
640 OP_VMOV_CtoD = 0xEC00,
641 OP_VMOV_DtoC = 0xEC10,
646 OP_VMOV_CtoS = 0xEE00,
647 OP_VMOV_StoC = 0xEE10,
654 OP_VCVT_FPIVFP = 0xEEB0,
656 OP_VMOV_IMM_T2 = 0xEEB0,
659 OP_VSQRT_T1 = 0xEEB0,
660 OP_VCVTSD_T1 = 0xEEB0,
661 OP_VCVTDS_T1 = 0xEEB0,
664 OP_AND_imm_T1 = 0xF000,
666 OP_ORR_imm_T1 = 0xF040,
667 OP_MOV_imm_T2 = 0xF040,
669 OP_EOR_imm_T1 = 0xF080,
670 OP_ADD_imm_T3 = 0xF100,
671 OP_ADD_S_imm_T3 = 0xF110,
674 OP_SUB_imm_T3 = 0xF1A0,
675 OP_SUB_S_imm_T3 = 0xF1B0,
676 OP_CMP_imm_T2 = 0xF1B0,
677 OP_RSB_imm_T2 = 0xF1C0,
678 OP_RSB_S_imm_T2 = 0xF1D0,
679 OP_ADD_imm_T4 = 0xF200,
680 OP_MOV_imm_T3 = 0xF240,
681 OP_SUB_imm_T4 = 0xF2A0,
685 OP_DMB_SY_T2a = 0xF3BF,
686 OP_STRB_imm_T3 = 0xF800,
687 OP_STRB_reg_T2 = 0xF800,
688 OP_LDRB_imm_T3 = 0xF810,
689 OP_LDRB_reg_T2 = 0xF810,
690 OP_STRH_imm_T3 = 0xF820,
691 OP_STRH_reg_T2 = 0xF820,
692 OP_LDRH_reg_T2 = 0xF830,
693 OP_LDRH_imm_T3 = 0xF830,
694 OP_STR_imm_T4 = 0xF840,
695 OP_STR_reg_T2 = 0xF840,
696 OP_LDR_imm_T4 = 0xF850,
697 OP_LDR_reg_T2 = 0xF850,
698 OP_STRB_imm_T2 = 0xF880,
699 OP_LDRB_imm_T2 = 0xF890,
700 OP_STRH_imm_T2 = 0xF8A0,
701 OP_LDRH_imm_T2 = 0xF8B0,
702 OP_STR_imm_T3 = 0xF8C0,
703 OP_LDR_imm_T3 = 0xF8D0,
704 OP_LDRSB_reg_T2 = 0xF910,
705 OP_LDRSH_reg_T2 = 0xF930,
706 OP_LSL_reg_T2 = 0xFA00,
707 OP_LSR_reg_T2 = 0xFA20,
708 OP_ASR_reg_T2 = 0xFA40,
709 OP_ROR_reg_T2 = 0xFA60,
711 OP_SMULL_T1 = 0xFB80,
712 #if HAVE(ARM_IDIV_INSTRUCTIONS)
719 OP_VADD_T2b = 0x0A00,
723 OP_VMOV_IMM_T2b = 0x0A00,
724 OP_VMOV_T2b = 0x0A40,
725 OP_VMUL_T2b = 0x0A00,
728 OP_VMOV_StoCb = 0x0A10,
729 OP_VMOV_CtoSb = 0x0A10,
730 OP_VMOV_DtoCb = 0x0A10,
731 OP_VMOV_CtoDb = 0x0A10,
733 OP_VABS_T2b = 0x0A40,
735 OP_VCVT_FPIVFPb = 0x0A40,
736 OP_VNEG_T2b = 0x0A40,
737 OP_VSUB_T2b = 0x0A40,
738 OP_VSQRT_T1b = 0x0A40,
739 OP_VCVTSD_T1b = 0x0A40,
740 OP_VCVTDS_T1b = 0x0A40,
742 OP_DMB_SY_T2b = 0x8F5F,
748 FourFours(unsigned f3, unsigned f2, unsigned f1, unsigned f0)
767 class ARMInstructionFormatter;
770 static bool ifThenElseConditionBit(Condition condition, bool isIf)
772 return isIf ? (condition & 1) : !(condition & 1);
774 static uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if)
776 int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
777 | (ifThenElseConditionBit(condition, inst3if) << 2)
778 | (ifThenElseConditionBit(condition, inst4if) << 1)
780 ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
781 return (condition << 4) | mask;
783 static uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if)
785 int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
786 | (ifThenElseConditionBit(condition, inst3if) << 2)
788 ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
789 return (condition << 4) | mask;
791 static uint8_t ifThenElse(Condition condition, bool inst2if)
793 int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
795 ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
796 return (condition << 4) | mask;
799 static uint8_t ifThenElse(Condition condition)
802 return (condition << 4) | mask;
807 void adc(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
809 // Rd can only be SP if Rn is also SP.
810 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
811 ASSERT(rd != ARMRegisters::pc);
812 ASSERT(rn != ARMRegisters::pc);
813 ASSERT(imm.isEncodedImm());
815 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADC_imm, rn, rd, imm);
818 void add(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
820 // Rd can only be SP if Rn is also SP.
821 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
822 ASSERT(rd != ARMRegisters::pc);
823 ASSERT(rn != ARMRegisters::pc);
824 ASSERT(imm.isValid());
826 if (rn == ARMRegisters::sp && imm.isUInt16()) {
827 ASSERT(!(imm.getUInt16() & 3));
828 if (!(rd & 8) && imm.isUInt10()) {
829 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, static_cast<uint8_t>(imm.getUInt10() >> 2));
831 } else if ((rd == ARMRegisters::sp) && imm.isUInt9()) {
832 m_formatter.oneWordOp9Imm7(OP_ADD_SP_imm_T2, static_cast<uint8_t>(imm.getUInt9() >> 2));
835 } else if (!((rd | rn) & 8)) {
837 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
839 } else if ((rd == rn) && imm.isUInt8()) {
840 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
845 if (imm.isEncodedImm())
846 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T3, rn, rd, imm);
848 ASSERT(imm.isUInt12());
849 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T4, rn, rd, imm);
853 ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
855 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
856 ASSERT(rd != ARMRegisters::pc);
857 ASSERT(rn != ARMRegisters::pc);
859 m_formatter.twoWordOp12Reg4FourFours(OP_ADD_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
862 // NOTE: In an IT block, add doesn't modify the flags register.
863 ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm)
865 if (rd == ARMRegisters::sp) {
871 m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rm, rd);
873 m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rn, rd);
874 else if (!((rd | rn | rm) & 8))
875 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
877 add(rd, rn, rm, ShiftTypeAndAmount());
880 // Not allowed in an IT (if then) block.
881 ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
883 // Rd can only be SP if Rn is also SP.
884 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
885 ASSERT(rd != ARMRegisters::pc);
886 ASSERT(rn != ARMRegisters::pc);
887 ASSERT(imm.isEncodedImm());
889 if (!((rd | rn) & 8)) {
891 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
893 } else if ((rd == rn) && imm.isUInt8()) {
894 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
899 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_S_imm_T3, rn, rd, imm);
902 // Not allowed in an IT (if then) block?
903 ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
905 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
906 ASSERT(rd != ARMRegisters::pc);
907 ASSERT(rn != ARMRegisters::pc);
909 m_formatter.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
912 // Not allowed in an IT (if then) block.
913 ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm)
915 if (!((rd | rn | rm) & 8))
916 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
918 add_S(rd, rn, rm, ShiftTypeAndAmount());
921 ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
925 ASSERT(imm.isEncodedImm());
926 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1, rn, rd, imm);
929 ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
934 m_formatter.twoWordOp12Reg4FourFours(OP_AND_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
937 ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm)
939 if ((rd == rn) && !((rd | rm) & 8))
940 m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rm, rd);
941 else if ((rd == rm) && !((rd | rn) & 8))
942 m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rn, rd);
944 ARM_and(rd, rn, rm, ShiftTypeAndAmount());
947 ALWAYS_INLINE void asr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
951 ShiftTypeAndAmount shift(SRType_ASR, shiftAmount);
952 m_formatter.twoWordOp16FourFours(OP_ASR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
955 ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, RegisterID rm)
960 m_formatter.twoWordOp12Reg4FourFours(OP_ASR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
963 // Only allowed in IT (if then) block if last instruction.
964 ALWAYS_INLINE AssemblerLabel b()
966 m_formatter.twoWordOp16Op16(OP_B_T4a, OP_B_T4b);
967 return m_formatter.label();
970 // Only allowed in IT (if then) block if last instruction.
971 ALWAYS_INLINE AssemblerLabel blx(RegisterID rm)
973 ASSERT(rm != ARMRegisters::pc);
974 m_formatter.oneWordOp8RegReg143(OP_BLX, rm, (RegisterID)8);
975 return m_formatter.label();
978 // Only allowed in IT (if then) block if last instruction.
979 ALWAYS_INLINE AssemblerLabel bx(RegisterID rm)
981 m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0);
982 return m_formatter.label();
985 void bkpt(uint8_t imm = 0)
987 m_formatter.oneWordOp8Imm8(OP_BKPT, imm);
990 ALWAYS_INLINE void clz(RegisterID rd, RegisterID rm)
994 m_formatter.twoWordOp12Reg4FourFours(OP_CLZ, rm, FourFours(0xf, rd, 8, rm));
997 ALWAYS_INLINE void cmn(RegisterID rn, ARMThumbImmediate imm)
999 ASSERT(rn != ARMRegisters::pc);
1000 ASSERT(imm.isEncodedImm());
1002 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm, rn, (RegisterID)0xf, imm);
1005 ALWAYS_INLINE void cmp(RegisterID rn, ARMThumbImmediate imm)
1007 ASSERT(rn != ARMRegisters::pc);
1008 ASSERT(imm.isEncodedImm());
1010 if (!(rn & 8) && imm.isUInt8())
1011 m_formatter.oneWordOp5Reg3Imm8(OP_CMP_imm_T1, rn, imm.getUInt8());
1013 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMP_imm_T2, rn, (RegisterID)0xf, imm);
1016 ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1018 ASSERT(rn != ARMRegisters::pc);
1019 ASSERT(!BadReg(rm));
1020 m_formatter.twoWordOp12Reg4FourFours(OP_CMP_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
1023 ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm)
1026 cmp(rn, rm, ShiftTypeAndAmount());
1028 m_formatter.oneWordOp10Reg3Reg3(OP_CMP_reg_T1, rm, rn);
1031 // xor is not spelled with an 'e'. :-(
1032 ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1034 ASSERT(!BadReg(rd));
1035 ASSERT(!BadReg(rn));
1036 ASSERT(imm.isEncodedImm());
1037 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1, rn, rd, imm);
1040 // xor is not spelled with an 'e'. :-(
1041 ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1043 ASSERT(!BadReg(rd));
1044 ASSERT(!BadReg(rn));
1045 ASSERT(!BadReg(rm));
1046 m_formatter.twoWordOp12Reg4FourFours(OP_EOR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1049 // xor is not spelled with an 'e'. :-(
1050 void eor(RegisterID rd, RegisterID rn, RegisterID rm)
1052 if ((rd == rn) && !((rd | rm) & 8))
1053 m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rm, rd);
1054 else if ((rd == rm) && !((rd | rn) & 8))
1055 m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rn, rd);
1057 eor(rd, rn, rm, ShiftTypeAndAmount());
1060 ALWAYS_INLINE void it(Condition cond)
1062 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond));
1065 ALWAYS_INLINE void it(Condition cond, bool inst2if)
1067 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if));
1070 ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if)
1072 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if));
1075 ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if, bool inst4if)
1077 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if, inst4if));
1080 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1081 ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1083 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1084 ASSERT(imm.isUInt12());
1086 if (!((rt | rn) & 8) && imm.isUInt7())
1087 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1088 else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
1089 m_formatter.oneWordOp5Reg3Imm8(OP_LDR_imm_T2, rt, static_cast<uint8_t>(imm.getUInt10() >> 2));
1091 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, imm.getUInt12());
1094 ALWAYS_INLINE void ldrWide8BitImmediate(RegisterID rt, RegisterID rn, uint8_t immediate)
1096 ASSERT(rn != ARMRegisters::pc);
1097 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, immediate);
1100 ALWAYS_INLINE void ldrCompact(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1102 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1103 ASSERT(imm.isUInt7());
1104 ASSERT(!((rt | rn) & 8));
1105 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1108 // If index is set, this is a regular offset or a pre-indexed load;
1109 // if index is not set then is is a post-index load.
1111 // If wback is set rn is updated - this is a pre or post index load,
1112 // if wback is not set this is a regular offset memory access.
1114 // (-255 <= offset <= 255)
1116 // _tmp = _reg + offset
1117 // MEM[index ? _tmp : _reg] = REG[rt]
1118 // if (wback) REG[rn] = _tmp
1119 ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1121 ASSERT(rt != ARMRegisters::pc);
1122 ASSERT(rn != ARMRegisters::pc);
1123 ASSERT(index || wback);
1124 ASSERT(!wback | (rt != rn));
1131 ASSERT((offset & ~0xff) == 0);
1133 offset |= (wback << 8);
1134 offset |= (add << 9);
1135 offset |= (index << 10);
1136 offset |= (1 << 11);
1138 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T4, rn, rt, offset);
1141 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1142 ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1144 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1145 ASSERT(!BadReg(rm));
1148 if (!shift && !((rt | rn | rm) & 8))
1149 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDR_reg_T1, rm, rn, rt);
1151 m_formatter.twoWordOp12Reg4FourFours(OP_LDR_reg_T2, rn, FourFours(rt, 0, shift, rm));
1154 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1155 ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1157 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1158 ASSERT(imm.isUInt12());
1159 ASSERT(!(imm.getUInt12() & 1));
1161 if (!((rt | rn) & 8) && imm.isUInt6())
1162 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 1, rn, rt);
1164 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2, rn, rt, imm.getUInt12());
1167 // If index is set, this is a regular offset or a pre-indexed load;
1168 // if index is not set then is is a post-index load.
1170 // If wback is set rn is updated - this is a pre or post index load,
1171 // if wback is not set this is a regular offset memory access.
1173 // (-255 <= offset <= 255)
1175 // _tmp = _reg + offset
1176 // MEM[index ? _tmp : _reg] = REG[rt]
1177 // if (wback) REG[rn] = _tmp
1178 ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1180 ASSERT(rt != ARMRegisters::pc);
1181 ASSERT(rn != ARMRegisters::pc);
1182 ASSERT(index || wback);
1183 ASSERT(!wback | (rt != rn));
1190 ASSERT((offset & ~0xff) == 0);
1192 offset |= (wback << 8);
1193 offset |= (add << 9);
1194 offset |= (index << 10);
1195 offset |= (1 << 11);
1197 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T3, rn, rt, offset);
1200 ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1202 ASSERT(!BadReg(rt)); // Memory hint
1203 ASSERT(rn != ARMRegisters::pc); // LDRH (literal)
1204 ASSERT(!BadReg(rm));
1207 if (!shift && !((rt | rn | rm) & 8))
1208 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRH_reg_T1, rm, rn, rt);
1210 m_formatter.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
1213 void ldrb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1215 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1216 ASSERT(imm.isUInt12());
1218 if (!((rt | rn) & 8) && imm.isUInt5())
1219 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRB_imm_T1, imm.getUInt5(), rn, rt);
1221 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T2, rn, rt, imm.getUInt12());
1224 void ldrb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1226 ASSERT(rt != ARMRegisters::pc);
1227 ASSERT(rn != ARMRegisters::pc);
1228 ASSERT(index || wback);
1229 ASSERT(!wback | (rt != rn));
1237 ASSERT(!(offset & ~0xff));
1239 offset |= (wback << 8);
1240 offset |= (add << 9);
1241 offset |= (index << 10);
1242 offset |= (1 << 11);
1244 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T3, rn, rt, offset);
1247 ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1249 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1250 ASSERT(!BadReg(rm));
1253 if (!shift && !((rt | rn | rm) & 8))
1254 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRB_reg_T1, rm, rn, rt);
1256 m_formatter.twoWordOp12Reg4FourFours(OP_LDRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
1259 void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1261 ASSERT(rn != ARMRegisters::pc);
1262 ASSERT(!BadReg(rm));
1265 if (!shift && !((rt | rn | rm) & 8))
1266 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRSB_reg_T1, rm, rn, rt);
1268 m_formatter.twoWordOp12Reg4FourFours(OP_LDRSB_reg_T2, rn, FourFours(rt, 0, shift, rm));
1271 void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1273 ASSERT(rn != ARMRegisters::pc);
1274 ASSERT(!BadReg(rm));
1277 if (!shift && !((rt | rn | rm) & 8))
1278 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRSH_reg_T1, rm, rn, rt);
1280 m_formatter.twoWordOp12Reg4FourFours(OP_LDRSH_reg_T2, rn, FourFours(rt, 0, shift, rm));
1283 void lsl(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1285 ASSERT(!BadReg(rd));
1286 ASSERT(!BadReg(rm));
1287 ShiftTypeAndAmount shift(SRType_LSL, shiftAmount);
1288 m_formatter.twoWordOp16FourFours(OP_LSL_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1291 ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, RegisterID rm)
1293 ASSERT(!BadReg(rd));
1294 ASSERT(!BadReg(rn));
1295 ASSERT(!BadReg(rm));
1296 m_formatter.twoWordOp12Reg4FourFours(OP_LSL_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1299 ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1301 ASSERT(!BadReg(rd));
1302 ASSERT(!BadReg(rm));
1303 ShiftTypeAndAmount shift(SRType_LSR, shiftAmount);
1304 m_formatter.twoWordOp16FourFours(OP_LSR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1307 ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, RegisterID rm)
1309 ASSERT(!BadReg(rd));
1310 ASSERT(!BadReg(rn));
1311 ASSERT(!BadReg(rm));
1312 m_formatter.twoWordOp12Reg4FourFours(OP_LSR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1315 ALWAYS_INLINE void movT3(RegisterID rd, ARMThumbImmediate imm)
1317 ASSERT(imm.isValid());
1318 ASSERT(!imm.isEncodedImm());
1319 ASSERT(!BadReg(rd));
1321 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3, imm.m_value.imm4, rd, imm);
1325 static void revertJumpTo_movT3movtcmpT2(void* instructionStart, RegisterID left, RegisterID right, uintptr_t imm)
1327 uint16_t* address = static_cast<uint16_t*>(instructionStart);
1328 ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm));
1329 ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm >> 16));
1330 uint16_t instruction[] = {
1331 twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16),
1332 twoWordOp5i6Imm4Reg4EncodedImmSecond(right, lo16),
1333 twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16),
1334 twoWordOp5i6Imm4Reg4EncodedImmSecond(right, hi16),
1335 static_cast<uint16_t>(OP_CMP_reg_T2 | left)
1337 performJITMemcpy(address, instruction, sizeof(uint16_t) * 5);
1338 cacheFlush(address, sizeof(uint16_t) * 5);
1341 static void revertJumpTo_movT3(void* instructionStart, RegisterID rd, ARMThumbImmediate imm)
1343 ASSERT(imm.isValid());
1344 ASSERT(!imm.isEncodedImm());
1345 ASSERT(!BadReg(rd));
1347 uint16_t* address = static_cast<uint16_t*>(instructionStart);
1348 uint16_t instruction[] = {
1349 twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, imm),
1350 twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, imm)
1352 performJITMemcpy(address, instruction, sizeof(uint16_t) * 2);
1353 cacheFlush(address, sizeof(uint16_t) * 2);
1357 ALWAYS_INLINE void mov(RegisterID rd, ARMThumbImmediate imm)
1359 ASSERT(imm.isValid());
1360 ASSERT(!BadReg(rd));
1362 if ((rd < 8) && imm.isUInt8())
1363 m_formatter.oneWordOp5Reg3Imm8(OP_MOV_imm_T1, rd, imm.getUInt8());
1364 else if (imm.isEncodedImm())
1365 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T2, 0xf, rd, imm);
1370 ALWAYS_INLINE void mov(RegisterID rd, RegisterID rm)
1372 m_formatter.oneWordOp8RegReg143(OP_MOV_reg_T1, rm, rd);
1375 ALWAYS_INLINE void movt(RegisterID rd, ARMThumbImmediate imm)
1377 ASSERT(imm.isUInt16());
1378 ASSERT(!BadReg(rd));
1379 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT, imm.m_value.imm4, rd, imm);
1382 ALWAYS_INLINE void mvn(RegisterID rd, ARMThumbImmediate imm)
1384 ASSERT(imm.isEncodedImm());
1385 ASSERT(!BadReg(rd));
1387 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm, 0xf, rd, imm);
1390 ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm, ShiftTypeAndAmount shift)
1392 ASSERT(!BadReg(rd));
1393 ASSERT(!BadReg(rm));
1394 m_formatter.twoWordOp16FourFours(OP_MVN_reg_T2, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1397 ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm)
1399 if (!((rd | rm) & 8))
1400 m_formatter.oneWordOp10Reg3Reg3(OP_MVN_reg_T1, rm, rd);
1402 mvn(rd, rm, ShiftTypeAndAmount());
1405 ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm)
1407 ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0);
1411 ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1413 ASSERT(!BadReg(rd));
1414 ASSERT(!BadReg(rn));
1415 ASSERT(imm.isEncodedImm());
1416 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1, rn, rd, imm);
1419 ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1421 ASSERT(!BadReg(rd));
1422 ASSERT(!BadReg(rn));
1423 ASSERT(!BadReg(rm));
1424 m_formatter.twoWordOp12Reg4FourFours(OP_ORR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1427 void orr(RegisterID rd, RegisterID rn, RegisterID rm)
1429 if ((rd == rn) && !((rd | rm) & 8))
1430 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
1431 else if ((rd == rm) && !((rd | rn) & 8))
1432 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
1434 orr(rd, rn, rm, ShiftTypeAndAmount());
1437 ALWAYS_INLINE void orr_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1439 ASSERT(!BadReg(rd));
1440 ASSERT(!BadReg(rn));
1441 ASSERT(!BadReg(rm));
1442 m_formatter.twoWordOp12Reg4FourFours(OP_ORR_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1445 void orr_S(RegisterID rd, RegisterID rn, RegisterID rm)
1447 if ((rd == rn) && !((rd | rm) & 8))
1448 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
1449 else if ((rd == rm) && !((rd | rn) & 8))
1450 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
1452 orr_S(rd, rn, rm, ShiftTypeAndAmount());
1455 ALWAYS_INLINE void ror(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1457 ASSERT(!BadReg(rd));
1458 ASSERT(!BadReg(rm));
1459 ShiftTypeAndAmount shift(SRType_ROR, shiftAmount);
1460 m_formatter.twoWordOp16FourFours(OP_ROR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1463 ALWAYS_INLINE void ror(RegisterID rd, RegisterID rn, RegisterID rm)
1465 ASSERT(!BadReg(rd));
1466 ASSERT(!BadReg(rn));
1467 ASSERT(!BadReg(rm));
1468 m_formatter.twoWordOp12Reg4FourFours(OP_ROR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1471 ALWAYS_INLINE void pop(RegisterID dest)
1473 if (dest < ARMRegisters::r8)
1474 m_formatter.oneWordOp7Imm9(OP_POP_T1, 1 << dest);
1476 // Load postindexed with writeback.
1477 ldr(dest, ARMRegisters::sp, sizeof(void*), false, true);
1481 ALWAYS_INLINE void pop(uint32_t registerList)
1483 ASSERT(WTF::bitCount(registerList) > 1);
1484 ASSERT(!((1 << ARMRegisters::pc) & registerList) || !((1 << ARMRegisters::lr) & registerList));
1485 ASSERT(!((1 << ARMRegisters::sp) & registerList));
1486 m_formatter.twoWordOp16Imm16(OP_POP_T2, registerList);
1489 ALWAYS_INLINE void push(RegisterID src)
1491 if (src < ARMRegisters::r8)
1492 m_formatter.oneWordOp7Imm9(OP_PUSH_T1, 1 << src);
1493 else if (src == ARMRegisters::lr)
1494 m_formatter.oneWordOp7Imm9(OP_PUSH_T1, 0x100);
1496 // Store preindexed with writeback.
1497 str(src, ARMRegisters::sp, -sizeof(void*), true, true);
1501 ALWAYS_INLINE void push(uint32_t registerList)
1503 ASSERT(WTF::bitCount(registerList) > 1);
1504 ASSERT(!((1 << ARMRegisters::pc) & registerList));
1505 ASSERT(!((1 << ARMRegisters::sp) & registerList));
1506 m_formatter.twoWordOp16Imm16(OP_PUSH_T2, registerList);
1509 #if HAVE(ARM_IDIV_INSTRUCTIONS)
1510 template<int datasize>
1511 ALWAYS_INLINE void sdiv(RegisterID rd, RegisterID rn, RegisterID rm)
1513 static_assert(datasize == 32, "sdiv datasize must be 32 for armv7s");
1514 ASSERT(!BadReg(rd));
1515 ASSERT(!BadReg(rn));
1516 ASSERT(!BadReg(rm));
1517 m_formatter.twoWordOp12Reg4FourFours(OP_SDIV_T1, rn, FourFours(0xf, rd, 0xf, rm));
1521 ALWAYS_INLINE void smull(RegisterID rdLo, RegisterID rdHi, RegisterID rn, RegisterID rm)
1523 ASSERT(!BadReg(rdLo));
1524 ASSERT(!BadReg(rdHi));
1525 ASSERT(!BadReg(rn));
1526 ASSERT(!BadReg(rm));
1527 ASSERT(rdLo != rdHi);
1528 m_formatter.twoWordOp12Reg4FourFours(OP_SMULL_T1, rn, FourFours(rdLo, rdHi, 0, rm));
1531 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1532 ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1534 ASSERT(rt != ARMRegisters::pc);
1535 ASSERT(rn != ARMRegisters::pc);
1536 ASSERT(imm.isUInt12());
1538 if (!((rt | rn) & 8) && imm.isUInt7())
1539 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1540 else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
1541 m_formatter.oneWordOp5Reg3Imm8(OP_STR_imm_T2, rt, static_cast<uint8_t>(imm.getUInt10() >> 2));
1543 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3, rn, rt, imm.getUInt12());
1546 // If index is set, this is a regular offset or a pre-indexed store;
1547 // if index is not set then is is a post-index store.
1549 // If wback is set rn is updated - this is a pre or post index store,
1550 // if wback is not set this is a regular offset memory access.
1552 // (-255 <= offset <= 255)
1554 // _tmp = _reg + offset
1555 // MEM[index ? _tmp : _reg] = REG[rt]
1556 // if (wback) REG[rn] = _tmp
1557 ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1559 ASSERT(rt != ARMRegisters::pc);
1560 ASSERT(rn != ARMRegisters::pc);
1561 ASSERT(index || wback);
1562 ASSERT(!wback | (rt != rn));
1569 ASSERT((offset & ~0xff) == 0);
1571 offset |= (wback << 8);
1572 offset |= (add << 9);
1573 offset |= (index << 10);
1574 offset |= (1 << 11);
1576 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T4, rn, rt, offset);
1579 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1580 ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1582 ASSERT(rn != ARMRegisters::pc);
1583 ASSERT(!BadReg(rm));
1586 if (!shift && !((rt | rn | rm) & 8))
1587 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STR_reg_T1, rm, rn, rt);
1589 m_formatter.twoWordOp12Reg4FourFours(OP_STR_reg_T2, rn, FourFours(rt, 0, shift, rm));
1592 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1593 ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1595 ASSERT(rt != ARMRegisters::pc);
1596 ASSERT(rn != ARMRegisters::pc);
1597 ASSERT(imm.isUInt12());
1599 if (!((rt | rn) & 8) && imm.isUInt7())
1600 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRB_imm_T1, imm.getUInt7() >> 2, rn, rt);
1602 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T2, rn, rt, imm.getUInt12());
1605 // If index is set, this is a regular offset or a pre-indexed store;
1606 // if index is not set then is is a post-index store.
1608 // If wback is set rn is updated - this is a pre or post index store,
1609 // if wback is not set this is a regular offset memory access.
1611 // (-255 <= offset <= 255)
1613 // _tmp = _reg + offset
1614 // MEM[index ? _tmp : _reg] = REG[rt]
1615 // if (wback) REG[rn] = _tmp
1616 ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1618 ASSERT(rt != ARMRegisters::pc);
1619 ASSERT(rn != ARMRegisters::pc);
1620 ASSERT(index || wback);
1621 ASSERT(!wback | (rt != rn));
1628 ASSERT((offset & ~0xff) == 0);
1630 offset |= (wback << 8);
1631 offset |= (add << 9);
1632 offset |= (index << 10);
1633 offset |= (1 << 11);
1635 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T3, rn, rt, offset);
1638 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1639 ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1641 ASSERT(rn != ARMRegisters::pc);
1642 ASSERT(!BadReg(rm));
1645 if (!shift && !((rt | rn | rm) & 8))
1646 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STRB_reg_T1, rm, rn, rt);
1648 m_formatter.twoWordOp12Reg4FourFours(OP_STRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
1651 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1652 ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1654 ASSERT(rt != ARMRegisters::pc);
1655 ASSERT(rn != ARMRegisters::pc);
1656 ASSERT(imm.isUInt12());
1658 if (!((rt | rn) & 8) && imm.isUInt6())
1659 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRH_imm_T1, imm.getUInt6() >> 1, rn, rt);
1661 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T2, rn, rt, imm.getUInt12());
1664 // If index is set, this is a regular offset or a pre-indexed store;
1665 // if index is not set then is is a post-index store.
1667 // If wback is set rn is updated - this is a pre or post index store,
1668 // if wback is not set this is a regular offset memory access.
1670 // (-255 <= offset <= 255)
1672 // _tmp = _reg + offset
1673 // MEM[index ? _tmp : _reg] = REG[rt]
1674 // if (wback) REG[rn] = _tmp
1675 ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1677 ASSERT(rt != ARMRegisters::pc);
1678 ASSERT(rn != ARMRegisters::pc);
1679 ASSERT(index || wback);
1680 ASSERT(!wback | (rt != rn));
1687 ASSERT(!(offset & ~0xff));
1689 offset |= (wback << 8);
1690 offset |= (add << 9);
1691 offset |= (index << 10);
1692 offset |= (1 << 11);
1694 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T3, rn, rt, offset);
1697 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1698 ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1700 ASSERT(rn != ARMRegisters::pc);
1701 ASSERT(!BadReg(rm));
1704 if (!shift && !((rt | rn | rm) & 8))
1705 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STRH_reg_T1, rm, rn, rt);
1707 m_formatter.twoWordOp12Reg4FourFours(OP_STRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
1710 ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1712 // Rd can only be SP if Rn is also SP.
1713 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1714 ASSERT(rd != ARMRegisters::pc);
1715 ASSERT(rn != ARMRegisters::pc);
1716 ASSERT(imm.isValid());
1718 if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
1719 ASSERT(!(imm.getUInt16() & 3));
1720 m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast<uint8_t>(imm.getUInt9() >> 2));
1722 } else if (!((rd | rn) & 8)) {
1723 if (imm.isUInt3()) {
1724 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
1726 } else if ((rd == rn) && imm.isUInt8()) {
1727 m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
1732 if (imm.isEncodedImm())
1733 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T3, rn, rd, imm);
1735 ASSERT(imm.isUInt12());
1736 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T4, rn, rd, imm);
1740 ALWAYS_INLINE void sub(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
1742 ASSERT(rd != ARMRegisters::pc);
1743 ASSERT(rn != ARMRegisters::pc);
1744 ASSERT(imm.isValid());
1745 ASSERT(imm.isUInt12());
1747 if (!((rd | rn) & 8) && !imm.getUInt12())
1748 m_formatter.oneWordOp10Reg3Reg3(OP_RSB_imm_T1, rn, rd);
1750 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_imm_T2, rn, rd, imm);
1753 ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1755 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1756 ASSERT(rd != ARMRegisters::pc);
1757 ASSERT(rn != ARMRegisters::pc);
1758 ASSERT(!BadReg(rm));
1759 m_formatter.twoWordOp12Reg4FourFours(OP_SUB_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1762 // NOTE: In an IT block, add doesn't modify the flags register.
1763 ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm)
1765 if (!((rd | rn | rm) & 8))
1766 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
1768 sub(rd, rn, rm, ShiftTypeAndAmount());
1771 // Not allowed in an IT (if then) block.
1772 void sub_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1774 // Rd can only be SP if Rn is also SP.
1775 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1776 ASSERT(rd != ARMRegisters::pc);
1777 ASSERT(rn != ARMRegisters::pc);
1778 ASSERT(imm.isValid());
1780 if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
1781 ASSERT(!(imm.getUInt16() & 3));
1782 m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast<uint8_t>(imm.getUInt9() >> 2));
1784 } else if (!((rd | rn) & 8)) {
1785 if (imm.isUInt3()) {
1786 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
1788 } else if ((rd == rn) && imm.isUInt8()) {
1789 m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
1794 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3, rn, rd, imm);
1797 ALWAYS_INLINE void sub_S(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
1799 ASSERT(rd != ARMRegisters::pc);
1800 ASSERT(rn != ARMRegisters::pc);
1801 ASSERT(imm.isValid());
1802 ASSERT(imm.isUInt12());
1804 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_S_imm_T2, rn, rd, imm);
1807 // Not allowed in an IT (if then) block?
1808 ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1810 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1811 ASSERT(rd != ARMRegisters::pc);
1812 ASSERT(rn != ARMRegisters::pc);
1813 ASSERT(!BadReg(rm));
1814 m_formatter.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1817 // Not allowed in an IT (if then) block.
1818 ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm)
1820 if (!((rd | rn | rm) & 8))
1821 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
1823 sub_S(rd, rn, rm, ShiftTypeAndAmount());
1826 ALWAYS_INLINE void tst(RegisterID rn, ARMThumbImmediate imm)
1828 ASSERT(!BadReg(rn));
1829 ASSERT(imm.isEncodedImm());
1831 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm, rn, (RegisterID)0xf, imm);
1834 ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1836 ASSERT(!BadReg(rn));
1837 ASSERT(!BadReg(rm));
1838 m_formatter.twoWordOp12Reg4FourFours(OP_TST_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
1841 ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm)
1844 tst(rn, rm, ShiftTypeAndAmount());
1846 m_formatter.oneWordOp10Reg3Reg3(OP_TST_reg_T1, rm, rn);
1849 ALWAYS_INLINE void ubfx(RegisterID rd, RegisterID rn, unsigned lsb, unsigned width)
1852 ASSERT((width >= 1) && (width <= 32));
1853 ASSERT((lsb + width) <= 32);
1854 m_formatter.twoWordOp12Reg40Imm3Reg4Imm20Imm5(OP_UBFX_T1, rd, rn, (lsb & 0x1c) << 10, (lsb & 0x3) << 6, (width - 1) & 0x1f);
1857 #if HAVE(ARM_IDIV_INSTRUCTIONS)
1858 ALWAYS_INLINE void udiv(RegisterID rd, RegisterID rn, RegisterID rm)
1860 ASSERT(!BadReg(rd));
1861 ASSERT(!BadReg(rn));
1862 ASSERT(!BadReg(rm));
1863 m_formatter.twoWordOp12Reg4FourFours(OP_UDIV_T1, rn, FourFours(0xf, rd, 0xf, rm));
1867 void vadd(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1869 m_formatter.vfpOp(OP_VADD_T2, OP_VADD_T2b, true, rn, rd, rm);
1872 void vcmp(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1874 m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(4), rd, rm);
1877 void vcmpz(FPDoubleRegisterID rd)
1879 m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(5), rd, VFPOperand(0));
1882 void vcvt_signedToFloatingPoint(FPDoubleRegisterID rd, FPSingleRegisterID rm)
1884 // boolean values are 64bit (toInt, unsigned, roundZero)
1885 m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(false, false, false), rd, rm);
1888 void vcvt_floatingPointToSigned(FPSingleRegisterID rd, FPDoubleRegisterID rm)
1890 // boolean values are 64bit (toInt, unsigned, roundZero)
1891 m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, false, true), rd, rm);
1894 void vcvt_floatingPointToUnsigned(FPSingleRegisterID rd, FPDoubleRegisterID rm)
1896 // boolean values are 64bit (toInt, unsigned, roundZero)
1897 m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, true, true), rd, rm);
1900 void vdiv(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1902 m_formatter.vfpOp(OP_VDIV, OP_VDIVb, true, rn, rd, rm);
1905 void vldr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
1907 m_formatter.vfpMemOp(OP_VLDR, OP_VLDRb, true, rn, rd, imm);
1910 void flds(FPSingleRegisterID rd, RegisterID rn, int32_t imm)
1912 m_formatter.vfpMemOp(OP_FLDS, OP_FLDSb, false, rn, rd, imm);
1915 void vmov(RegisterID rd, FPSingleRegisterID rn)
1917 ASSERT(!BadReg(rd));
1918 m_formatter.vfpOp(OP_VMOV_StoC, OP_VMOV_StoCb, false, rn, rd, VFPOperand(0));
1921 void vmov(FPSingleRegisterID rd, RegisterID rn)
1923 ASSERT(!BadReg(rn));
1924 m_formatter.vfpOp(OP_VMOV_CtoS, OP_VMOV_CtoSb, false, rd, rn, VFPOperand(0));
1927 void vmov(RegisterID rd1, RegisterID rd2, FPDoubleRegisterID rn)
1929 ASSERT(!BadReg(rd1));
1930 ASSERT(!BadReg(rd2));
1931 m_formatter.vfpOp(OP_VMOV_DtoC, OP_VMOV_DtoCb, true, rd2, VFPOperand(rd1 | 16), rn);
1934 void vmov(FPDoubleRegisterID rd, RegisterID rn1, RegisterID rn2)
1936 ASSERT(!BadReg(rn1));
1937 ASSERT(!BadReg(rn2));
1938 m_formatter.vfpOp(OP_VMOV_CtoD, OP_VMOV_CtoDb, true, rn2, VFPOperand(rn1 | 16), rd);
1941 void vmov(FPDoubleRegisterID rd, FPDoubleRegisterID rn)
1943 m_formatter.vfpOp(OP_VMOV_T2, OP_VMOV_T2b, true, VFPOperand(0), rd, rn);
1946 void vmrs(RegisterID reg = ARMRegisters::pc)
1948 ASSERT(reg != ARMRegisters::sp);
1949 m_formatter.vfpOp(OP_VMRS, OP_VMRSb, false, VFPOperand(1), VFPOperand(0x10 | reg), VFPOperand(0));
1952 void vmul(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1954 m_formatter.vfpOp(OP_VMUL_T2, OP_VMUL_T2b, true, rn, rd, rm);
1957 void vstr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
1959 m_formatter.vfpMemOp(OP_VSTR, OP_VSTRb, true, rn, rd, imm);
1962 void fsts(FPSingleRegisterID rd, RegisterID rn, int32_t imm)
1964 m_formatter.vfpMemOp(OP_FSTS, OP_FSTSb, false, rn, rd, imm);
1967 void vsub(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1969 m_formatter.vfpOp(OP_VSUB_T2, OP_VSUB_T2b, true, rn, rd, rm);
1972 void vabs(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1974 m_formatter.vfpOp(OP_VABS_T2, OP_VABS_T2b, true, VFPOperand(16), rd, rm);
1977 void vneg(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1979 m_formatter.vfpOp(OP_VNEG_T2, OP_VNEG_T2b, true, VFPOperand(1), rd, rm);
1982 void vsqrt(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1984 m_formatter.vfpOp(OP_VSQRT_T1, OP_VSQRT_T1b, true, VFPOperand(17), rd, rm);
1987 void vcvtds(FPDoubleRegisterID rd, FPSingleRegisterID rm)
1989 m_formatter.vfpOp(OP_VCVTDS_T1, OP_VCVTDS_T1b, false, VFPOperand(23), rd, rm);
1992 void vcvtsd(FPSingleRegisterID rd, FPDoubleRegisterID rm)
1994 m_formatter.vfpOp(OP_VCVTSD_T1, OP_VCVTSD_T1b, true, VFPOperand(23), rd, rm);
1999 m_formatter.oneWordOp8Imm8(OP_NOP_T1, 0);
2004 m_formatter.twoWordOp16Op16(OP_NOP_T2a, OP_NOP_T2b);
2007 static constexpr int16_t nopPseudo16()
2012 static constexpr int32_t nopPseudo32()
2014 return OP_NOP_T2a | (OP_NOP_T2b << 16);
2017 static void fillNops(void* base, size_t size, bool isCopyingToExecutableMemory)
2019 RELEASE_ASSERT(!(size % sizeof(int16_t)));
2021 char* ptr = static_cast<char*>(base);
2022 const size_t num32s = size / sizeof(int32_t);
2023 for (size_t i = 0; i < num32s; i++) {
2024 const int32_t insn = nopPseudo32();
2025 if (isCopyingToExecutableMemory)
2026 performJITMemcpy(ptr, &insn, sizeof(int32_t));
2028 memcpy(ptr, &insn, sizeof(int32_t));
2029 ptr += sizeof(int32_t);
2032 const size_t num16s = (size % sizeof(int32_t)) / sizeof(int16_t);
2033 ASSERT(num16s == 0 || num16s == 1);
2034 ASSERT(num16s * sizeof(int16_t) + num32s * sizeof(int32_t) == size);
2036 const int16_t insn = nopPseudo16();
2037 if (isCopyingToExecutableMemory)
2038 performJITMemcpy(ptr, &insn, sizeof(int16_t));
2040 memcpy(ptr, &insn, sizeof(int16_t));
2046 m_formatter.twoWordOp16Op16(OP_DMB_SY_T2a, OP_DMB_SY_T2b);
2049 AssemblerLabel labelIgnoringWatchpoints()
2051 return m_formatter.label();
2054 AssemblerLabel labelForWatchpoint()
2056 AssemblerLabel result = m_formatter.label();
2057 if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint)
2059 m_indexOfLastWatchpoint = result.m_offset;
2060 m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
2064 AssemblerLabel label()
2066 AssemblerLabel result = m_formatter.label();
2067 while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
2068 if (UNLIKELY(static_cast<int>(result.m_offset) + 4 <= m_indexOfTailOfLastWatchpoint))
2072 result = m_formatter.label();
2077 AssemblerLabel align(int alignment)
2079 while (!m_formatter.isAligned(alignment))
2085 static void* getRelocatedAddress(void* code, AssemblerLabel label)
2087 ASSERT(label.isSet());
2088 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset);
2091 static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
2093 return b.m_offset - a.m_offset;
2096 static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
2098 // Assembler admin methods:
2100 static ALWAYS_INLINE bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b)
2102 return a.from() < b.from();
2105 static bool canCompact(JumpType jumpType)
2107 // The following cannot be compacted:
2108 // JumpFixed: represents custom jump sequence
2109 // JumpNoConditionFixedSize: represents unconditional jump that must remain a fixed size
2110 // JumpConditionFixedSize: represents conditional jump that must remain a fixed size
2111 return (jumpType == JumpNoCondition) || (jumpType == JumpCondition);
2114 static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
2116 if (jumpType == JumpFixed)
2119 // for patchable jump we must leave space for the longest code sequence
2120 if (jumpType == JumpNoConditionFixedSize)
2122 if (jumpType == JumpConditionFixedSize)
2123 return LinkConditionalBX;
2125 const int paddingSize = JUMP_ENUM_SIZE(jumpType);
2127 if (jumpType == JumpCondition) {
2128 // 2-byte conditional T1
2129 const uint16_t* jumpT1Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT1)));
2130 if (canBeJumpT1(jumpT1Location, to))
2132 // 4-byte conditional T3
2133 const uint16_t* jumpT3Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT3)));
2134 if (canBeJumpT3(jumpT3Location, to))
2136 // 4-byte conditional T4 with IT
2137 const uint16_t* conditionalJumpT4Location =
2138 reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkConditionalJumpT4)));
2139 if (canBeJumpT4(conditionalJumpT4Location, to))
2140 return LinkConditionalJumpT4;
2142 // 2-byte unconditional T2
2143 const uint16_t* jumpT2Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT2)));
2144 if (canBeJumpT2(jumpT2Location, to))
2146 // 4-byte unconditional T4
2147 const uint16_t* jumpT4Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT4)));
2148 if (canBeJumpT4(jumpT4Location, to))
2150 // use long jump sequence
2154 ASSERT(jumpType == JumpCondition);
2155 return LinkConditionalBX;
2158 static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
2160 JumpLinkType linkType = computeJumpType(record.type(), from, to);
2161 record.setLinkType(linkType);
2165 Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink()
2167 std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator);
2168 return m_jumpsToLink;
2171 static void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction8, uint8_t* to)
2173 const uint16_t* fromInstruction = reinterpret_cast_ptr<const uint16_t*>(fromInstruction8);
2174 switch (record.linkType()) {
2176 linkJumpT1(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to);
2179 linkJumpT2(reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to);
2182 linkJumpT3(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to);
2185 linkJumpT4(reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to);
2187 case LinkConditionalJumpT4:
2188 linkConditionalJumpT4(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to);
2190 case LinkConditionalBX:
2191 linkConditionalBX(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to);
2194 linkBX(reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to);
2197 RELEASE_ASSERT_NOT_REACHED();
2202 void* unlinkedCode() { return m_formatter.data(); }
2203 size_t codeSize() const { return m_formatter.codeSize(); }
2205 static unsigned getCallReturnOffset(AssemblerLabel call)
2207 ASSERT(call.isSet());
2208 return call.m_offset;
2211 // Linking & patching:
2213 // 'link' and 'patch' methods are for use on unprotected code - such as the code
2214 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
2215 // code has been finalized it is (platform support permitting) within a non-
2216 // writable region of memory; to modify the code in an execute-only execuable
2217 // pool the 'repatch' and 'relink' methods should be used.
2219 void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition)
2222 ASSERT(from.isSet());
2223 m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition));
2226 static void linkJump(void* code, AssemblerLabel from, void* to)
2228 ASSERT(from.isSet());
2230 uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
2231 linkJumpAbsolute(location, location, to);
2234 static void linkCall(void* code, AssemblerLabel from, void* to)
2236 ASSERT(!(reinterpret_cast<intptr_t>(code) & 1));
2237 ASSERT(from.isSet());
2239 setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset) - 1, to, false);
2242 static void linkPointer(void* code, AssemblerLabel where, void* value)
2244 setPointer(reinterpret_cast<char*>(code) + where.m_offset, value, false);
2247 // The static relink and replace methods can use can use |from| for both
2248 // the write and executable address for call and jump patching
2249 // as they're modifying existing (linked) code, so the address being
2250 // provided is correct for relative address computation.
2251 static void relinkJump(void* from, void* to)
2253 ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
2254 ASSERT(!(reinterpret_cast<intptr_t>(to) & 1));
2256 linkJumpAbsolute(reinterpret_cast<uint16_t*>(from), reinterpret_cast<uint16_t*>(from), to);
2258 cacheFlush(reinterpret_cast<uint16_t*>(from) - 5, 5 * sizeof(uint16_t));
2261 static void relinkCall(void* from, void* to)
2263 ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
2265 setPointer(reinterpret_cast<uint16_t*>(from) - 1, to, true);
2268 static void* readCallTarget(void* from)
2270 return readPointer(reinterpret_cast<uint16_t*>(from) - 1);
2273 static void repatchInt32(void* where, int32_t value)
2275 ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
2277 setInt32(where, value, true);
2280 static void repatchCompact(void* where, int32_t offset)
2282 ASSERT(offset >= -255 && offset <= 255);
2290 offset |= (add << 9);
2291 offset |= (1 << 10);
2292 offset |= (1 << 11);
2294 uint16_t* location = reinterpret_cast<uint16_t*>(where);
2295 uint16_t instruction = location[1] & ~((1 << 12) - 1);
2296 instruction |= offset;
2297 performJITMemcpy(location + 1, &instruction, sizeof(uint16_t));
2298 cacheFlush(location, sizeof(uint16_t) * 2);
2301 static void repatchPointer(void* where, void* value)
2303 ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
2305 setPointer(where, value, true);
2308 static void* readPointer(void* where)
2310 return reinterpret_cast<void*>(readInt32(where));
2313 static void replaceWithJump(void* instructionStart, void* to)
2315 ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
2316 ASSERT(!(bitwise_cast<uintptr_t>(to) & 1));
2319 if (canBeJumpT4(reinterpret_cast<uint16_t*>(instructionStart), to)) {
2320 uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 2;
2321 linkJumpT4(ptr, ptr, to);
2322 cacheFlush(ptr - 2, sizeof(uint16_t) * 2);
2324 uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 5;
2325 linkBX(ptr, ptr, to);
2326 cacheFlush(ptr - 5, sizeof(uint16_t) * 5);
2329 uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 2;
2330 linkJumpT4(ptr, ptr, to);
2331 cacheFlush(ptr - 2, sizeof(uint16_t) * 2);
2335 static ptrdiff_t maxJumpReplacementSize()
2344 static constexpr ptrdiff_t patchableJumpSize()
2349 static void replaceWithLoad(void* instructionStart)
2351 ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
2352 uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart);
2353 switch (ptr[0] & 0xFFF0) {
2356 case OP_ADD_imm_T3: {
2357 ASSERT(!(ptr[1] & 0xF000));
2358 uint16_t instructions[2];
2359 instructions[0] = ptr[0] & 0x000F;
2360 instructions[0] |= OP_LDR_imm_T3;
2361 instructions[1] = ptr[1] | (ptr[1] & 0x0F00) << 4;
2362 instructions[1] &= 0xF0FF;
2363 performJITMemcpy(ptr, instructions, sizeof(uint16_t) * 2);
2364 cacheFlush(ptr, sizeof(uint16_t) * 2);
2368 RELEASE_ASSERT_NOT_REACHED();
2372 static void replaceWithAddressComputation(void* instructionStart)
2374 ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
2375 uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart);
2376 switch (ptr[0] & 0xFFF0) {
2377 case OP_LDR_imm_T3: {
2378 ASSERT(!(ptr[1] & 0x0F00));
2379 uint16_t instructions[2];
2380 instructions[0] = ptr[0] & 0x000F;
2381 instructions[0] |= OP_ADD_imm_T3;
2382 instructions[1] = ptr[1] | (ptr[1] & 0xF000) >> 4;
2383 instructions[1] &= 0x0FFF;
2384 performJITMemcpy(ptr, instructions, sizeof(uint16_t) * 2);
2385 cacheFlush(ptr, sizeof(uint16_t) * 2);
2391 RELEASE_ASSERT_NOT_REACHED();
2395 unsigned debugOffset() { return m_formatter.debugOffset(); }
2398 static inline void linuxPageFlush(uintptr_t begin, uintptr_t end)
2410 : "r" (begin), "r" (end)
2411 : "r0", "r1", "r2");
2415 static void cacheFlush(void* code, size_t size)
2418 sys_cache_control(kCacheFunctionPrepareForExecution, code, size);
2420 size_t page = pageSize();
2421 uintptr_t current = reinterpret_cast<uintptr_t>(code);
2422 uintptr_t end = current + size;
2423 uintptr_t firstPageEnd = (current & ~(page - 1)) + page;
2425 if (end <= firstPageEnd) {
2426 linuxPageFlush(current, end);
2430 linuxPageFlush(current, firstPageEnd);
2432 for (current = firstPageEnd; current + page < end; current += page)
2433 linuxPageFlush(current, current + page);
2435 linuxPageFlush(current, end);
2437 #error "The cacheFlush support is missing on this platform."
2442 // VFP operations commonly take one or more 5-bit operands, typically representing a
2443 // floating point register number. This will commonly be encoded in the instruction
2444 // in two parts, with one single bit field, and one 4-bit field. In the case of
2445 // double precision operands the high bit of the register number will be encoded
2446 // separately, and for single precision operands the high bit of the register number
2447 // will be encoded individually.
2448 // VFPOperand encapsulates a 5-bit VFP operand, with bits 0..3 containing the 4-bit
2449 // field to be encoded together in the instruction (the low 4-bits of a double
2450 // register number, or the high 4-bits of a single register number), and bit 4
2451 // contains the bit value to be encoded individually.
2453 explicit VFPOperand(uint32_t value)
2456 ASSERT(!(m_value & ~0x1f));
2459 VFPOperand(FPDoubleRegisterID reg)
2464 VFPOperand(RegisterID reg)
2469 VFPOperand(FPSingleRegisterID reg)
2470 : m_value(((reg & 1) << 4) | (reg >> 1)) // rotate the lowest bit of 'reg' to the top.
2476 return m_value >> 4;
2481 return m_value & 0xf;
2487 VFPOperand vcvtOp(bool toInteger, bool isUnsigned, bool isRoundZero)
2489 // Cannot specify rounding when converting to float.
2490 ASSERT(toInteger || !isRoundZero);
2494 // opc2 indicates both toInteger & isUnsigned.
2495 op |= isUnsigned ? 0x4 : 0x5;
2496 // 'op' field in instruction is isRoundZero
2500 ASSERT(!isRoundZero);
2501 // 'op' field in instruction is isUnsigned
2505 return VFPOperand(op);
2508 static void setInt32(void* code, uint32_t value, bool flush)
2510 uint16_t* location = reinterpret_cast<uint16_t*>(code);
2511 ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
2513 ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value));
2514 ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value >> 16));
2515 uint16_t instructions[4];
2516 instructions[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2517 instructions[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-3] >> 8) & 0xf, lo16);
2518 instructions[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2519 instructions[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-1] >> 8) & 0xf, hi16);
2521 performJITMemcpy(location - 4, instructions, 4 * sizeof(uint16_t));
2523 cacheFlush(location - 4, 4 * sizeof(uint16_t));
2526 static int32_t readInt32(void* code)
2528 uint16_t* location = reinterpret_cast<uint16_t*>(code);
2529 ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
2531 ARMThumbImmediate lo16;
2532 ARMThumbImmediate hi16;
2533 decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(lo16, location[-4]);
2534 decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(lo16, location[-3]);
2535 decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(hi16, location[-2]);
2536 decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(hi16, location[-1]);
2537 uint32_t result = hi16.asUInt16();
2539 result |= lo16.asUInt16();
2540 return static_cast<int32_t>(result);
2543 static void setUInt7ForLoad(void* code, ARMThumbImmediate imm)
2545 // Requires us to have planted a LDR_imm_T1
2546 ASSERT(imm.isValid());
2547 ASSERT(imm.isUInt7());
2548 uint16_t* location = reinterpret_cast<uint16_t*>(code);
2549 uint16_t instruction;
2550 instruction = location[0] & ~((static_cast<uint16_t>(0x7f) >> 2) << 6);
2551 instruction |= (imm.getUInt7() >> 2) << 6;
2552 performJITMemcpy(location, &instruction, sizeof(uint16_t));
2553 cacheFlush(location, sizeof(uint16_t));
2556 static void setPointer(void* code, void* value, bool flush)
2558 setInt32(code, reinterpret_cast<uint32_t>(value), flush);
2561 static bool isB(const void* address)
2563 const uint16_t* instruction = static_cast<const uint16_t*>(address);
2564 return ((instruction[0] & 0xf800) == OP_B_T4a) && ((instruction[1] & 0xd000) == OP_B_T4b);
2567 static bool isBX(const void* address)
2569 const uint16_t* instruction = static_cast<const uint16_t*>(address);
2570 return (instruction[0] & 0xff87) == OP_BX;
2573 static bool isMOV_imm_T3(const void* address)
2575 const uint16_t* instruction = static_cast<const uint16_t*>(address);
2576 return ((instruction[0] & 0xFBF0) == OP_MOV_imm_T3) && ((instruction[1] & 0x8000) == 0);
2579 static bool isMOVT(const void* address)
2581 const uint16_t* instruction = static_cast<const uint16_t*>(address);
2582 return ((instruction[0] & 0xFBF0) == OP_MOVT) && ((instruction[1] & 0x8000) == 0);
2585 static bool isNOP_T1(const void* address)
2587 const uint16_t* instruction = static_cast<const uint16_t*>(address);
2588 return instruction[0] == OP_NOP_T1;
2591 static bool isNOP_T2(const void* address)
2593 const uint16_t* instruction = static_cast<const uint16_t*>(address);
2594 return (instruction[0] == OP_NOP_T2a) && (instruction[1] == OP_NOP_T2b);
2597 static bool canBeJumpT1(const uint16_t* instruction, const void* target)
2599 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2600 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2602 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2603 // It does not appear to be documented in the ARM ARM (big surprise), but
2604 // for OP_B_T1 the branch displacement encoded in the instruction is 2
2605 // less than the actual displacement.
2607 return ((relative << 23) >> 23) == relative;
2610 static bool canBeJumpT2(const uint16_t* instruction, const void* target)
2612 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2613 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2615 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2616 // It does not appear to be documented in the ARM ARM (big surprise), but
2617 // for OP_B_T2 the branch displacement encoded in the instruction is 2
2618 // less than the actual displacement.
2620 return ((relative << 20) >> 20) == relative;
2623 static bool canBeJumpT3(const uint16_t* instruction, const void* target)
2625 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2626 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2628 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2629 return ((relative << 11) >> 11) == relative;
2632 static bool canBeJumpT4(const uint16_t* instruction, const void* target)
2634 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2635 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2637 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2638 return ((relative << 7) >> 7) == relative;
2641 static void linkJumpT1(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target)
2643 // FIMXE: this should be up in the MacroAssembler layer. :-(
2644 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2645 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2646 ASSERT(canBeJumpT1(instruction, target));
2648 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2649 // It does not appear to be documented in the ARM ARM (big surprise), but
2650 // for OP_B_T1 the branch displacement encoded in the instruction is 2
2651 // less than the actual displacement.
2654 // All branch offsets should be an even distance.
2655 ASSERT(!(relative & 1));
2656 uint16_t newInstruction = OP_B_T1 | ((cond & 0xf) << 8) | ((relative & 0x1fe) >> 1);
2657 performJITMemcpy(writeTarget - 1, &newInstruction, sizeof(uint16_t));
2660 static void linkJumpT2(uint16_t* writeTarget, const uint16_t* instruction, void* target)
2662 // FIMXE: this should be up in the MacroAssembler layer. :-(
2663 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2664 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2665 ASSERT(canBeJumpT2(instruction, target));
2667 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2668 // It does not appear to be documented in the ARM ARM (big surprise), but
2669 // for OP_B_T2 the branch displacement encoded in the instruction is 2
2670 // less than the actual displacement.
2673 // All branch offsets should be an even distance.
2674 ASSERT(!(relative & 1));
2675 uint16_t newInstruction = OP_B_T2 | ((relative & 0xffe) >> 1);
2676 performJITMemcpy(writeTarget - 1, &newInstruction, sizeof(uint16_t));
2679 static void linkJumpT3(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target)
2681 // FIMXE: this should be up in the MacroAssembler layer. :-(
2682 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2683 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2684 ASSERT(canBeJumpT3(instruction, target));
2686 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2688 // All branch offsets should be an even distance.
2689 ASSERT(!(relative & 1));
2690 uint16_t instructions[2];
2691 instructions[0] = OP_B_T3a | ((relative & 0x100000) >> 10) | ((cond & 0xf) << 6) | ((relative & 0x3f000) >> 12);
2692 instructions[1] = OP_B_T3b | ((relative & 0x80000) >> 8) | ((relative & 0x40000) >> 5) | ((relative & 0xffe) >> 1);
2693 performJITMemcpy(writeTarget - 2, instructions, 2 * sizeof(uint16_t));
2696 static void linkJumpT4(uint16_t* writeTarget, const uint16_t* instruction, void* target)
2698 // FIMXE: this should be up in the MacroAssembler layer. :-(
2699 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2700 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2701 ASSERT(canBeJumpT4(instruction, target));
2703 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2704 // ARM encoding for the top two bits below the sign bit is 'peculiar'.
2706 relative ^= 0xC00000;
2708 // All branch offsets should be an even distance.
2709 ASSERT(!(relative & 1));
2710 uint16_t instructions[2];
2711 instructions[0] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
2712 instructions[1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
2713 performJITMemcpy(writeTarget - 2, instructions, 2 * sizeof(uint16_t));
2716 static void linkConditionalJumpT4(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target)
2718 // FIMXE: this should be up in the MacroAssembler layer. :-(
2719 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2720 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2722 uint16_t newInstruction = ifThenElse(cond) | OP_IT;
2723 performJITMemcpy(writeTarget - 3, &newInstruction, sizeof(uint16_t));
2724 linkJumpT4(writeTarget, instruction, target);
2727 static void linkBX(uint16_t* writeTarget, const uint16_t* instruction, void* target)
2729 // FIMXE: this should be up in the MacroAssembler layer. :-(
2730 ASSERT_UNUSED(instruction, !(reinterpret_cast<intptr_t>(instruction) & 1));
2731 ASSERT(!(reinterpret_cast<intptr_t>(writeTarget) & 1));
2732 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2734 const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
2735 ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
2736 ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
2737 uint16_t instructions[5];
2738 instructions[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2739 instructions[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
2740 instructions[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2741 instructions[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
2742 instructions[4] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
2744 performJITMemcpy(writeTarget - 5, instructions, 5 * sizeof(uint16_t));
2747 static void linkConditionalBX(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target)
2749 // FIMXE: this should be up in the MacroAssembler layer. :-(
2750 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2751 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2753 linkBX(writeTarget, instruction, target);
2754 uint16_t newInstruction = ifThenElse(cond, true, true) | OP_IT;
2755 performJITMemcpy(writeTarget - 6, &newInstruction, sizeof(uint16_t));
2758 static void linkJumpAbsolute(uint16_t* writeTarget, const uint16_t* instruction, void* target)
2760 // FIMXE: this should be up in the MacroAssembler layer. :-(
2761 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2762 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2764 ASSERT((isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1))
2765 || (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2)));
2767 if (canBeJumpT4(instruction, target)) {
2768 // There may be a better way to fix this, but right now put the NOPs first, since in the
2769 // case of an conditional branch this will be coming after an ITTT predicating *three*
2770 // instructions! Looking backwards to modify the ITTT to an IT is not easy, due to
2771 // variable wdith encoding - the previous instruction might *look* like an ITTT but
2772 // actually be the second half of a 2-word op.
2773 uint16_t instructions[3];
2774 instructions[0] = OP_NOP_T1;
2775 instructions[1] = OP_NOP_T2a;
2776 instructions[2] = OP_NOP_T2b;
2777 performJITMemcpy(writeTarget - 5, instructions, 3 * sizeof(uint16_t));
2778 linkJumpT4(writeTarget, instruction, target);
2780 const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
2781 ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
2782 ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
2784 uint16_t instructions[5];
2785 instructions[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2786 instructions[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
2787 instructions[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2788 instructions[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
2789 instructions[4] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
2790 performJITMemcpy(writeTarget - 5, instructions, 5 * sizeof(uint16_t));
2794 static uint16_t twoWordOp5i6Imm4Reg4EncodedImmFirst(uint16_t op, ARMThumbImmediate imm)
2796 return op | (imm.m_value.i << 10) | imm.m_value.imm4;
2799 static void decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(ARMThumbImmediate& result, uint16_t value)
2801 result.m_value.i = (value >> 10) & 1;
2802 result.m_value.imm4 = value & 15;
2805 static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd, ARMThumbImmediate imm)
2807 return (imm.m_value.imm3 << 12) | (rd << 8) | imm.m_value.imm8;
2810 static void decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(ARMThumbImmediate& result, uint16_t value)
2812 result.m_value.imm3 = (value >> 12) & 7;
2813 result.m_value.imm8 = value & 255;
2816 class ARMInstructionFormatter {
2818 ALWAYS_INLINE void oneWordOp5Reg3Imm8(OpcodeID op, RegisterID rd, uint8_t imm)
2820 m_buffer.putShort(op | (rd << 8) | imm);
2823 ALWAYS_INLINE void oneWordOp5Imm5Reg3Reg3(OpcodeID op, uint8_t imm, RegisterID reg1, RegisterID reg2)
2825 m_buffer.putShort(op | (imm << 6) | (reg1 << 3) | reg2);
2828 ALWAYS_INLINE void oneWordOp7Reg3Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2, RegisterID reg3)
2830 m_buffer.putShort(op | (reg1 << 6) | (reg2 << 3) | reg3);
2833 ALWAYS_INLINE void oneWordOp7Imm9(OpcodeID op, uint16_t imm)
2835 m_buffer.putShort(op | imm);
2838 ALWAYS_INLINE void oneWordOp8Imm8(OpcodeID op, uint8_t imm)
2840 m_buffer.putShort(op | imm);
2843 ALWAYS_INLINE void oneWordOp8RegReg143(OpcodeID op, RegisterID reg1, RegisterID reg2)
2845 m_buffer.putShort(op | ((reg2 & 8) << 4) | (reg1 << 3) | (reg2 & 7));
2848 ALWAYS_INLINE void oneWordOp9Imm7(OpcodeID op, uint8_t imm)
2850 m_buffer.putShort(op | imm);
2853 ALWAYS_INLINE void oneWordOp10Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2)
2855 m_buffer.putShort(op | (reg1 << 3) | reg2);
2858 ALWAYS_INLINE void twoWordOp12Reg4FourFours(OpcodeID1 op, RegisterID reg, FourFours ff)
2860 m_buffer.putShort(op | reg);
2861 m_buffer.putShort(ff.m_u.value);
2864 ALWAYS_INLINE void twoWordOp16FourFours(OpcodeID1 op, FourFours ff)
2866 m_buffer.putShort(op);
2867 m_buffer.putShort(ff.m_u.value);
2870 ALWAYS_INLINE void twoWordOp16Op16(OpcodeID1 op1, OpcodeID2 op2)
2872 m_buffer.putShort(op1);
2873 m_buffer.putShort(op2);
2876 ALWAYS_INLINE void twoWordOp16Imm16(OpcodeID1 op1, uint16_t imm)
2878 m_buffer.putShort(op1);
2879 m_buffer.putShort(imm);
2882 ALWAYS_INLINE void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm)
2884 ARMThumbImmediate newImm = imm;
2885 newImm.m_value.imm4 = imm4;
2887 m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmFirst(op, newImm));
2888 m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, newImm));
2891 ALWAYS_INLINE void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm)
2893 m_buffer.putShort(op | reg1);
2894 m_buffer.putShort((reg2 << 12) | imm);
2897 ALWAYS_INLINE void twoWordOp12Reg40Imm3Reg4Imm20Imm5(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm1, uint16_t imm2, uint16_t imm3)
2899 m_buffer.putShort(op | reg1);
2900 m_buffer.putShort((imm1 << 12) | (reg2 << 8) | (imm2 << 6) | imm3);
2903 // Formats up instructions of the pattern:
2904 // 111111111B11aaaa:bbbb222SA2C2cccc
2905 // Where 1s in the pattern come from op1, 2s in the pattern come from op2, S is the provided size bit.
2906 // Operands provide 5 bit values of the form Aaaaa, Bbbbb, Ccccc.
2907 ALWAYS_INLINE void vfpOp(OpcodeID1 op1, OpcodeID2 op2, bool size, VFPOperand a, VFPOperand b, VFPOperand c)
2909 ASSERT(!(op1 & 0x004f));
2910 ASSERT(!(op2 & 0xf1af));
2911 m_buffer.putShort(op1 | b.bits1() << 6 | a.bits4());
2912 m_buffer.putShort(op2 | b.bits4() << 12 | size << 8 | a.bits1() << 7 | c.bits1() << 5 | c.bits4());
2915 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
2916 // (i.e. +/-(0..255) 32-bit words)
2917 ALWAYS_INLINE void vfpMemOp(OpcodeID1 op1, OpcodeID2 op2, bool size, RegisterID rn, VFPOperand rd, int32_t imm)
2925 uint32_t offset = imm;
2926 ASSERT(!(offset & ~0x3fc));
2929 m_buffer.putShort(op1 | (up << 7) | rd.bits1() << 6 | rn);
2930 m_buffer.putShort(op2 | rd.bits4() << 12 | size << 8 | offset);
2933 // Administrative methods:
2935 size_t codeSize() const { return m_buffer.codeSize(); }
2936 AssemblerLabel label() const { return m_buffer.label(); }
2937 bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
2938 void* data() const { return m_buffer.data(); }
2940 unsigned debugOffset() { return m_buffer.debugOffset(); }
2942 AssemblerBuffer m_buffer;
2945 Vector<LinkRecord, 0, UnsafeVectorOverflow> m_jumpsToLink;
2946 int m_indexOfLastWatchpoint;
2947 int m_indexOfTailOfLastWatchpoint;
2952 #endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
2954 #endif // ARMAssembler_h