2 * Copyright (C) 2009-2019 Apple Inc. All rights reserved.
3 * Copyright (C) 2010 University of Szeged
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
31 #include "AssemblerBuffer.h"
32 #include "AssemblerCommon.h"
33 #include "RegisterInfo.h"
35 #include <wtf/Assertions.h>
36 #include <wtf/Vector.h>
41 namespace RegisterNames {
43 typedef enum : int8_t {
44 #define REGISTER_ID(id, name, r, cs) id,
45 FOR_EACH_GP_REGISTER(REGISTER_ID)
48 #define REGISTER_ALIAS(id, name, alias) id = alias,
49 FOR_EACH_REGISTER_ALIAS(REGISTER_ALIAS)
54 typedef enum : int8_t {
55 #define REGISTER_ID(id, name) id,
56 FOR_EACH_SP_REGISTER(REGISTER_ID)
60 typedef enum : int8_t {
61 #define REGISTER_ID(id, name, r, cs) id,
62 FOR_EACH_FP_SINGLE_REGISTER(REGISTER_ID)
66 typedef enum : int8_t {
67 #define REGISTER_ID(id, name, r, cs) id,
68 FOR_EACH_FP_DOUBLE_REGISTER(REGISTER_ID)
74 typedef enum : int8_t {
75 #define REGISTER_ID(id, name, r, cs) id,
76 FOR_EACH_FP_QUAD_REGISTER(REGISTER_ID)
79 #endif // CPU(ARM_NEON)
81 inline FPSingleRegisterID asSingle(FPDoubleRegisterID reg)
84 return (FPSingleRegisterID)(reg << 1);
87 inline FPSingleRegisterID asSingleUpper(FPDoubleRegisterID reg)
90 return (FPSingleRegisterID)((reg << 1) + 1);
93 inline FPDoubleRegisterID asDouble(FPSingleRegisterID reg)
96 return (FPDoubleRegisterID)(reg >> 1);
99 } // namespace ARMRegisters
101 class ARMv7Assembler;
102 class ARMThumbImmediate {
103 friend class ARMv7Assembler;
105 typedef uint8_t ThumbImmediateType;
106 static const ThumbImmediateType TypeInvalid = 0;
107 static const ThumbImmediateType TypeEncoded = 1;
108 static const ThumbImmediateType TypeUInt16 = 2;
118 // If this is an encoded immediate, then it may describe a shift, or a pattern.
120 unsigned shiftValue7 : 7;
121 unsigned shiftAmount : 5;
124 unsigned immediate : 8;
125 unsigned pattern : 4;
127 } ThumbImmediateValue;
129 // byte0 contains least significant bit; not using an array to make client code endian agnostic.
140 ALWAYS_INLINE static void countLeadingZerosPartial(uint32_t& value, int32_t& zeros, const int N)
142 if (value & ~((1 << N) - 1)) /* check for any of the top N bits (of 2N bits) are set */
143 value >>= N; /* if any were set, lose the bottom N */
144 else /* if none of the top N bits are set, */
145 zeros += N; /* then we have identified N leading zeros */
148 static int32_t countLeadingZeros(uint32_t value)
154 countLeadingZerosPartial(value, zeros, 16);
155 countLeadingZerosPartial(value, zeros, 8);
156 countLeadingZerosPartial(value, zeros, 4);
157 countLeadingZerosPartial(value, zeros, 2);
158 countLeadingZerosPartial(value, zeros, 1);
163 : m_type(TypeInvalid)
168 ARMThumbImmediate(ThumbImmediateType type, ThumbImmediateValue value)
174 ARMThumbImmediate(ThumbImmediateType type, uint16_t value)
177 // Make sure this constructor is only reached with type TypeUInt16;
178 // this extra parameter makes the code a little clearer by making it
179 // explicit at call sites which type is being constructed
180 ASSERT_UNUSED(type, type == TypeUInt16);
182 m_value.asInt = value;
186 static ARMThumbImmediate makeEncodedImm(uint32_t value)
188 ThumbImmediateValue encoding;
191 // okay, these are easy.
193 encoding.immediate = value;
194 encoding.pattern = 0;
195 return ARMThumbImmediate(TypeEncoded, encoding);
198 int32_t leadingZeros = countLeadingZeros(value);
199 // if there were 24 or more leading zeros, then we'd have hit the (value < 256) case.
200 ASSERT(leadingZeros < 24);
202 // Given a number with bit fields Z:B:C, where count(Z)+count(B)+count(C) == 32,
203 // Z are the bits known zero, B is the 8-bit immediate, C are the bits to check for
204 // zero. count(B) == 8, so the count of bits to be checked is 24 - count(Z).
205 int32_t rightShiftAmount = 24 - leadingZeros;
206 if (value == ((value >> rightShiftAmount) << rightShiftAmount)) {
207 // Shift the value down to the low byte position. The assign to
208 // shiftValue7 drops the implicit top bit.
209 encoding.shiftValue7 = value >> rightShiftAmount;
210 // The endoded shift amount is the magnitude of a right rotate.
211 encoding.shiftAmount = 8 + leadingZeros;
212 return ARMThumbImmediate(TypeEncoded, encoding);
218 if ((bytes.byte0 == bytes.byte1) && (bytes.byte0 == bytes.byte2) && (bytes.byte0 == bytes.byte3)) {
219 encoding.immediate = bytes.byte0;
220 encoding.pattern = 3;
221 return ARMThumbImmediate(TypeEncoded, encoding);
224 if ((bytes.byte0 == bytes.byte2) && !(bytes.byte1 | bytes.byte3)) {
225 encoding.immediate = bytes.byte0;
226 encoding.pattern = 1;
227 return ARMThumbImmediate(TypeEncoded, encoding);
230 if ((bytes.byte1 == bytes.byte3) && !(bytes.byte0 | bytes.byte2)) {
231 encoding.immediate = bytes.byte1;
232 encoding.pattern = 2;
233 return ARMThumbImmediate(TypeEncoded, encoding);
236 return ARMThumbImmediate();
239 static ARMThumbImmediate makeUInt12(int32_t value)
241 return (!(value & 0xfffff000))
242 ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
243 : ARMThumbImmediate();
246 static ARMThumbImmediate makeUInt12OrEncodedImm(int32_t value)
248 // If this is not a 12-bit unsigned it, try making an encoded immediate.
249 return (!(value & 0xfffff000))
250 ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
251 : makeEncodedImm(value);
254 // The 'make' methods, above, return a !isValid() value if the argument
255 // cannot be represented as the requested type. This methods is called
256 // 'get' since the argument can always be represented.
257 static ARMThumbImmediate makeUInt16(uint16_t value)
259 return ARMThumbImmediate(TypeUInt16, value);
264 return m_type != TypeInvalid;
267 uint16_t asUInt16() const { return m_value.asInt; }
269 // These methods rely on the format of encoded byte values.
270 bool isUInt3() { return !(m_value.asInt & 0xfff8); }
271 bool isUInt4() { return !(m_value.asInt & 0xfff0); }
272 bool isUInt5() { return !(m_value.asInt & 0xffe0); }
273 bool isUInt6() { return !(m_value.asInt & 0xffc0); }
274 bool isUInt7() { return !(m_value.asInt & 0xff80); }
275 bool isUInt8() { return !(m_value.asInt & 0xff00); }
276 bool isUInt9() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfe00); }
277 bool isUInt10() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfc00); }
278 bool isUInt12() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xf000); }
279 bool isUInt16() { return m_type == TypeUInt16; }
280 uint8_t getUInt3() { ASSERT(isUInt3()); return m_value.asInt; }
281 uint8_t getUInt4() { ASSERT(isUInt4()); return m_value.asInt; }
282 uint8_t getUInt5() { ASSERT(isUInt5()); return m_value.asInt; }
283 uint8_t getUInt6() { ASSERT(isUInt6()); return m_value.asInt; }
284 uint8_t getUInt7() { ASSERT(isUInt7()); return m_value.asInt; }
285 uint8_t getUInt8() { ASSERT(isUInt8()); return m_value.asInt; }
286 uint16_t getUInt9() { ASSERT(isUInt9()); return m_value.asInt; }
287 uint16_t getUInt10() { ASSERT(isUInt10()); return m_value.asInt; }
288 uint16_t getUInt12() { ASSERT(isUInt12()); return m_value.asInt; }
289 uint16_t getUInt16() { ASSERT(isUInt16()); return m_value.asInt; }
291 bool isEncodedImm() { return m_type == TypeEncoded; }
294 ThumbImmediateType m_type;
295 ThumbImmediateValue m_value;
304 SRType_RRX = SRType_ROR
307 class ShiftTypeAndAmount {
308 friend class ARMv7Assembler;
313 m_u.type = (ARMShiftType)0;
317 ShiftTypeAndAmount(ARMShiftType type, unsigned amount)
320 m_u.amount = amount & 31;
323 unsigned lo4() { return m_u.lo4; }
324 unsigned hi4() { return m_u.hi4; }
339 class ARMv7Assembler {
341 typedef ARMRegisters::RegisterID RegisterID;
342 typedef ARMRegisters::FPSingleRegisterID FPSingleRegisterID;
343 typedef ARMRegisters::FPDoubleRegisterID FPDoubleRegisterID;
345 typedef ARMRegisters::FPQuadRegisterID FPQuadRegisterID;
347 typedef ARMRegisters::SPRegisterID SPRegisterID;
348 typedef FPDoubleRegisterID FPRegisterID;
350 static constexpr RegisterID firstRegister() { return ARMRegisters::r0; }
351 static constexpr RegisterID lastRegister() { return ARMRegisters::r15; }
352 static constexpr unsigned numberOfRegisters() { return lastRegister() - firstRegister() + 1; }
354 static constexpr SPRegisterID firstSPRegister() { return ARMRegisters::apsr; }
355 static constexpr SPRegisterID lastSPRegister() { return ARMRegisters::fpscr; }
356 static constexpr unsigned numberOfSPRegisters() { return lastSPRegister() - firstSPRegister() + 1; }
358 static constexpr FPRegisterID firstFPRegister() { return ARMRegisters::d0; }
359 #if CPU(ARM_NEON) || CPU(ARM_VFP_V3_D32)
360 static constexpr FPRegisterID lastFPRegister() { return ARMRegisters::d31; }
362 static constexpr FPRegisterID lastFPRegister() { return ARMRegisters::d15; }
364 static constexpr unsigned numberOfFPRegisters() { return lastFPRegister() - firstFPRegister() + 1; }
366 static const char* gprName(RegisterID id)
368 ASSERT(id >= firstRegister() && id <= lastRegister());
369 static const char* const nameForRegister[numberOfRegisters()] = {
370 #define REGISTER_NAME(id, name, r, cs) name,
371 FOR_EACH_GP_REGISTER(REGISTER_NAME)
374 return nameForRegister[id];
377 static const char* sprName(SPRegisterID id)
379 ASSERT(id >= firstSPRegister() && id <= lastSPRegister());
380 static const char* const nameForRegister[numberOfSPRegisters()] = {
381 #define REGISTER_NAME(id, name) name,
382 FOR_EACH_SP_REGISTER(REGISTER_NAME)
385 return nameForRegister[id];
388 static const char* fprName(FPRegisterID id)
390 ASSERT(id >= firstFPRegister() && id <= lastFPRegister());
391 static const char* const nameForRegister[numberOfFPRegisters()] = {
392 #define REGISTER_NAME(id, name, r, cs) name,
393 FOR_EACH_FP_DOUBLE_REGISTER(REGISTER_NAME)
396 return nameForRegister[id];
399 // (HS, LO, HI, LS) -> (AE, B, A, BE)
400 // (VS, VC) -> (O, NO)
402 ConditionEQ, // Zero / Equal.
403 ConditionNE, // Non-zero / Not equal.
404 ConditionHS, ConditionCS = ConditionHS, // Unsigned higher or same.
405 ConditionLO, ConditionCC = ConditionLO, // Unsigned lower.
406 ConditionMI, // Negative.
407 ConditionPL, // Positive or zero.
408 ConditionVS, // Overflowed.
409 ConditionVC, // Not overflowed.
410 ConditionHI, // Unsigned higher.
411 ConditionLS, // Unsigned lower or same.
412 ConditionGE, // Signed greater than or equal.
413 ConditionLT, // Signed less than.
414 ConditionGT, // Signed greater than.
415 ConditionLE, // Signed less than or equal.
416 ConditionAL, // Unconditional / Always execute.
420 #define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 3) | (index))
421 #define JUMP_ENUM_SIZE(jump) ((jump) >> 3)
422 enum JumpType { JumpFixed = JUMP_ENUM_WITH_SIZE(0, 0),
423 JumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 5 * sizeof(uint16_t)),
424 JumpCondition = JUMP_ENUM_WITH_SIZE(2, 6 * sizeof(uint16_t)),
425 JumpNoConditionFixedSize = JUMP_ENUM_WITH_SIZE(3, 5 * sizeof(uint16_t)),
426 JumpConditionFixedSize = JUMP_ENUM_WITH_SIZE(4, 6 * sizeof(uint16_t))
429 LinkInvalid = JUMP_ENUM_WITH_SIZE(0, 0),
430 LinkJumpT1 = JUMP_ENUM_WITH_SIZE(1, sizeof(uint16_t)),
431 LinkJumpT2 = JUMP_ENUM_WITH_SIZE(2, sizeof(uint16_t)),
432 LinkJumpT3 = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint16_t)),
433 LinkJumpT4 = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint16_t)),
434 LinkConditionalJumpT4 = JUMP_ENUM_WITH_SIZE(5, 3 * sizeof(uint16_t)),
435 LinkBX = JUMP_ENUM_WITH_SIZE(6, 5 * sizeof(uint16_t)),
436 LinkConditionalBX = JUMP_ENUM_WITH_SIZE(7, 6 * sizeof(uint16_t))
441 LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition)
443 data.realTypes.m_from = from;
444 data.realTypes.m_to = to;
445 data.realTypes.m_type = type;
446 data.realTypes.m_linkType = LinkInvalid;
447 data.realTypes.m_condition = condition;
449 void operator=(const LinkRecord& other)
451 data.copyTypes.content[0] = other.data.copyTypes.content[0];
452 data.copyTypes.content[1] = other.data.copyTypes.content[1];
453 data.copyTypes.content[2] = other.data.copyTypes.content[2];
455 intptr_t from() const { return data.realTypes.m_from; }
456 void setFrom(intptr_t from) { data.realTypes.m_from = from; }
457 intptr_t to() const { return data.realTypes.m_to; }
458 JumpType type() const { return data.realTypes.m_type; }
459 JumpLinkType linkType() const { return data.realTypes.m_linkType; }
460 void setLinkType(JumpLinkType linkType) { ASSERT(data.realTypes.m_linkType == LinkInvalid); data.realTypes.m_linkType = linkType; }
461 Condition condition() const { return data.realTypes.m_condition; }
465 intptr_t m_from : 31;
468 JumpLinkType m_linkType : 8;
469 Condition m_condition : 16;
474 COMPILE_ASSERT(sizeof(RealTypes) == sizeof(CopyTypes), LinkRecordCopyStructSizeEqualsRealStruct);
479 : m_indexOfLastWatchpoint(INT_MIN)
480 , m_indexOfTailOfLastWatchpoint(INT_MIN)
484 AssemblerBuffer& buffer() { return m_formatter.m_buffer; }
489 static bool BadReg(RegisterID reg)
491 return (reg == ARMRegisters::sp) || (reg == ARMRegisters::pc);
494 uint32_t singleRegisterMask(FPSingleRegisterID rdNum, int highBitsShift, int lowBitShift)
496 uint32_t rdMask = (rdNum >> 1) << highBitsShift;
498 rdMask |= 1 << lowBitShift;
502 uint32_t doubleRegisterMask(FPDoubleRegisterID rdNum, int highBitShift, int lowBitsShift)
504 uint32_t rdMask = (rdNum & 0xf) << lowBitsShift;
506 rdMask |= 1 << highBitShift;
511 OP_ADD_reg_T1 = 0x1800,
512 OP_SUB_reg_T1 = 0x1A00,
513 OP_ADD_imm_T1 = 0x1C00,
514 OP_SUB_imm_T1 = 0x1E00,
515 OP_MOV_imm_T1 = 0x2000,
516 OP_CMP_imm_T1 = 0x2800,
517 OP_ADD_imm_T2 = 0x3000,
518 OP_SUB_imm_T2 = 0x3800,
519 OP_AND_reg_T1 = 0x4000,
520 OP_EOR_reg_T1 = 0x4040,
521 OP_TST_reg_T1 = 0x4200,
522 OP_RSB_imm_T1 = 0x4240,
523 OP_CMP_reg_T1 = 0x4280,
524 OP_ORR_reg_T1 = 0x4300,
525 OP_MVN_reg_T1 = 0x43C0,
526 OP_ADD_reg_T2 = 0x4400,
527 OP_MOV_reg_T1 = 0x4600,
530 OP_STR_reg_T1 = 0x5000,
531 OP_STRH_reg_T1 = 0x5200,
532 OP_STRB_reg_T1 = 0x5400,
533 OP_LDRSB_reg_T1 = 0x5600,
534 OP_LDR_reg_T1 = 0x5800,
535 OP_LDRH_reg_T1 = 0x5A00,
536 OP_LDRB_reg_T1 = 0x5C00,
537 OP_LDRSH_reg_T1 = 0x5E00,
538 OP_STR_imm_T1 = 0x6000,
539 OP_LDR_imm_T1 = 0x6800,
540 OP_STRB_imm_T1 = 0x7000,
541 OP_LDRB_imm_T1 = 0x7800,
542 OP_STRH_imm_T1 = 0x8000,
543 OP_LDRH_imm_T1 = 0x8800,
544 OP_STR_imm_T2 = 0x9000,
545 OP_LDR_imm_T2 = 0x9800,
546 OP_ADD_SP_imm_T1 = 0xA800,
547 OP_ADD_SP_imm_T2 = 0xB000,
548 OP_SUB_SP_imm_T1 = 0xB080,
561 OP_AND_reg_T2 = 0xEA00,
562 OP_TST_reg_T2 = 0xEA10,
563 OP_ORR_reg_T2 = 0xEA40,
564 OP_ORR_S_reg_T2 = 0xEA50,
565 OP_ASR_imm_T1 = 0xEA4F,
566 OP_LSL_imm_T1 = 0xEA4F,
567 OP_LSR_imm_T1 = 0xEA4F,
568 OP_ROR_imm_T1 = 0xEA4F,
569 OP_MVN_reg_T2 = 0xEA6F,
570 OP_EOR_reg_T2 = 0xEA80,
571 OP_ADD_reg_T3 = 0xEB00,
572 OP_ADD_S_reg_T3 = 0xEB10,
573 OP_SUB_reg_T2 = 0xEBA0,
574 OP_SUB_S_reg_T2 = 0xEBB0,
575 OP_CMP_reg_T2 = 0xEBB0,
576 OP_VMOV_CtoD = 0xEC00,
577 OP_VMOV_DtoC = 0xEC10,
582 OP_VMOV_CtoS = 0xEE00,
583 OP_VMOV_StoC = 0xEE10,
590 OP_VCVT_FPIVFP = 0xEEB0,
592 OP_VMOV_IMM_T2 = 0xEEB0,
595 OP_VSQRT_T1 = 0xEEB0,
596 OP_VCVTSD_T1 = 0xEEB0,
597 OP_VCVTDS_T1 = 0xEEB0,
600 OP_AND_imm_T1 = 0xF000,
602 OP_ORR_imm_T1 = 0xF040,
603 OP_MOV_imm_T2 = 0xF040,
605 OP_EOR_imm_T1 = 0xF080,
606 OP_ADD_imm_T3 = 0xF100,
607 OP_ADD_S_imm_T3 = 0xF110,
610 OP_SUB_imm_T3 = 0xF1A0,
611 OP_SUB_S_imm_T3 = 0xF1B0,
612 OP_CMP_imm_T2 = 0xF1B0,
613 OP_RSB_imm_T2 = 0xF1C0,
614 OP_RSB_S_imm_T2 = 0xF1D0,
615 OP_ADD_imm_T4 = 0xF200,
616 OP_MOV_imm_T3 = 0xF240,
617 OP_SUB_imm_T4 = 0xF2A0,
622 OP_STRB_imm_T3 = 0xF800,
623 OP_STRB_reg_T2 = 0xF800,
624 OP_LDRB_imm_T3 = 0xF810,
625 OP_LDRB_reg_T2 = 0xF810,
626 OP_STRH_imm_T3 = 0xF820,
627 OP_STRH_reg_T2 = 0xF820,
628 OP_LDRH_reg_T2 = 0xF830,
629 OP_LDRH_imm_T3 = 0xF830,
630 OP_STR_imm_T4 = 0xF840,
631 OP_STR_reg_T2 = 0xF840,
632 OP_LDR_imm_T4 = 0xF850,
633 OP_LDR_reg_T2 = 0xF850,
634 OP_STRB_imm_T2 = 0xF880,
635 OP_LDRB_imm_T2 = 0xF890,
636 OP_STRH_imm_T2 = 0xF8A0,
637 OP_LDRH_imm_T2 = 0xF8B0,
638 OP_STR_imm_T3 = 0xF8C0,
639 OP_LDR_imm_T3 = 0xF8D0,
640 OP_LDRSB_reg_T2 = 0xF910,
641 OP_LDRSH_reg_T2 = 0xF930,
642 OP_LSL_reg_T2 = 0xFA00,
643 OP_LSR_reg_T2 = 0xFA20,
644 OP_ASR_reg_T2 = 0xFA40,
645 OP_ROR_reg_T2 = 0xFA60,
647 OP_SMULL_T1 = 0xFB80,
648 #if HAVE(ARM_IDIV_INSTRUCTIONS)
656 OP_VADD_T2b = 0x0A00,
660 OP_VMOV_IMM_T2b = 0x0A00,
661 OP_VMOV_T2b = 0x0A40,
662 OP_VMUL_T2b = 0x0A00,
665 OP_VMOV_StoCb = 0x0A10,
666 OP_VMOV_CtoSb = 0x0A10,
667 OP_VMOV_DtoCb = 0x0A10,
668 OP_VMOV_CtoDb = 0x0A10,
670 OP_VABS_T2b = 0x0A40,
672 OP_VCVT_FPIVFPb = 0x0A40,
673 OP_VNEG_T2b = 0x0A40,
674 OP_VSUB_T2b = 0x0A40,
675 OP_VSQRT_T1b = 0x0A40,
676 OP_VCVTSD_T1b = 0x0A40,
677 OP_VCVTDS_T1b = 0x0A40,
679 OP_DMB_SY_T1b = 0x8F5F,
680 OP_DMB_ISHST_T1b = 0x8F5A,
686 FourFours(unsigned f3, unsigned f2, unsigned f1, unsigned f0)
705 class ARMInstructionFormatter;
708 static bool ifThenElseConditionBit(Condition condition, bool isIf)
710 return isIf ? (condition & 1) : !(condition & 1);
712 static uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if)
714 int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
715 | (ifThenElseConditionBit(condition, inst3if) << 2)
716 | (ifThenElseConditionBit(condition, inst4if) << 1)
718 ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
719 return (condition << 4) | mask;
721 static uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if)
723 int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
724 | (ifThenElseConditionBit(condition, inst3if) << 2)
726 ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
727 return (condition << 4) | mask;
729 static uint8_t ifThenElse(Condition condition, bool inst2if)
731 int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
733 ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
734 return (condition << 4) | mask;
737 static uint8_t ifThenElse(Condition condition)
740 return (condition << 4) | mask;
745 void adc(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
747 // Rd can only be SP if Rn is also SP.
748 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
749 ASSERT(rd != ARMRegisters::pc);
750 ASSERT(rn != ARMRegisters::pc);
751 ASSERT(imm.isEncodedImm());
753 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADC_imm, rn, rd, imm);
756 void add(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
758 // Rd can only be SP if Rn is also SP.
759 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
760 ASSERT(rd != ARMRegisters::pc);
761 ASSERT(rn != ARMRegisters::pc);
762 ASSERT(imm.isValid());
764 if (rn == ARMRegisters::sp && imm.isUInt16()) {
765 ASSERT(!(imm.getUInt16() & 3));
766 if (!(rd & 8) && imm.isUInt10()) {
767 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, static_cast<uint8_t>(imm.getUInt10() >> 2));
769 } else if ((rd == ARMRegisters::sp) && imm.isUInt9()) {
770 m_formatter.oneWordOp9Imm7(OP_ADD_SP_imm_T2, static_cast<uint8_t>(imm.getUInt9() >> 2));
773 } else if (!((rd | rn) & 8)) {
775 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
777 } else if ((rd == rn) && imm.isUInt8()) {
778 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
783 if (imm.isEncodedImm())
784 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T3, rn, rd, imm);
786 ASSERT(imm.isUInt12());
787 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T4, rn, rd, imm);
791 ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
793 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
794 ASSERT(rd != ARMRegisters::pc);
795 ASSERT(rn != ARMRegisters::pc);
797 m_formatter.twoWordOp12Reg4FourFours(OP_ADD_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
800 // NOTE: In an IT block, add doesn't modify the flags register.
801 ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm)
803 if (rd == ARMRegisters::sp) {
809 m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rm, rd);
811 m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rn, rd);
812 else if (!((rd | rn | rm) & 8))
813 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
815 add(rd, rn, rm, ShiftTypeAndAmount());
818 // Not allowed in an IT (if then) block.
819 ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
821 // Rd can only be SP if Rn is also SP.
822 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
823 ASSERT(rd != ARMRegisters::pc);
824 ASSERT(rn != ARMRegisters::pc);
825 ASSERT(imm.isEncodedImm());
827 if (!((rd | rn) & 8)) {
829 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
831 } else if ((rd == rn) && imm.isUInt8()) {
832 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
837 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_S_imm_T3, rn, rd, imm);
840 // Not allowed in an IT (if then) block?
841 ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
843 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
844 ASSERT(rd != ARMRegisters::pc);
845 ASSERT(rn != ARMRegisters::pc);
847 m_formatter.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
850 // Not allowed in an IT (if then) block.
851 ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm)
853 if (!((rd | rn | rm) & 8))
854 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
856 add_S(rd, rn, rm, ShiftTypeAndAmount());
859 ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
863 ASSERT(imm.isEncodedImm());
864 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1, rn, rd, imm);
867 ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
872 m_formatter.twoWordOp12Reg4FourFours(OP_AND_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
875 ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm)
877 if ((rd == rn) && !((rd | rm) & 8))
878 m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rm, rd);
879 else if ((rd == rm) && !((rd | rn) & 8))
880 m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rn, rd);
882 ARM_and(rd, rn, rm, ShiftTypeAndAmount());
885 ALWAYS_INLINE void asr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
889 ShiftTypeAndAmount shift(SRType_ASR, shiftAmount);
890 m_formatter.twoWordOp16FourFours(OP_ASR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
893 ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, RegisterID rm)
898 m_formatter.twoWordOp12Reg4FourFours(OP_ASR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
901 // Only allowed in IT (if then) block if last instruction.
902 ALWAYS_INLINE AssemblerLabel b()
904 m_formatter.twoWordOp16Op16(OP_B_T4a, OP_B_T4b);
905 return m_formatter.label();
908 // Only allowed in IT (if then) block if last instruction.
909 ALWAYS_INLINE AssemblerLabel blx(RegisterID rm)
911 ASSERT(rm != ARMRegisters::pc);
912 m_formatter.oneWordOp8RegReg143(OP_BLX, rm, (RegisterID)8);
913 return m_formatter.label();
916 // Only allowed in IT (if then) block if last instruction.
917 ALWAYS_INLINE AssemblerLabel bx(RegisterID rm)
919 m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0);
920 return m_formatter.label();
923 void bkpt(uint8_t imm = 0)
925 m_formatter.oneWordOp8Imm8(OP_BKPT, imm);
928 static bool isBkpt(void* address)
930 unsigned short expected = OP_BKPT;
931 unsigned short immediateMask = 0xff;
932 unsigned short candidateInstruction = *reinterpret_cast<unsigned short*>(address);
933 return (candidateInstruction & ~immediateMask) == expected;
936 ALWAYS_INLINE void clz(RegisterID rd, RegisterID rm)
940 m_formatter.twoWordOp12Reg4FourFours(OP_CLZ, rm, FourFours(0xf, rd, 8, rm));
943 ALWAYS_INLINE void cmn(RegisterID rn, ARMThumbImmediate imm)
945 ASSERT(rn != ARMRegisters::pc);
946 ASSERT(imm.isEncodedImm());
948 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm, rn, (RegisterID)0xf, imm);
951 ALWAYS_INLINE void cmp(RegisterID rn, ARMThumbImmediate imm)
953 ASSERT(rn != ARMRegisters::pc);
954 ASSERT(imm.isEncodedImm());
956 if (!(rn & 8) && imm.isUInt8())
957 m_formatter.oneWordOp5Reg3Imm8(OP_CMP_imm_T1, rn, imm.getUInt8());
959 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMP_imm_T2, rn, (RegisterID)0xf, imm);
962 ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
964 ASSERT(rn != ARMRegisters::pc);
966 m_formatter.twoWordOp12Reg4FourFours(OP_CMP_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
969 ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm)
972 cmp(rn, rm, ShiftTypeAndAmount());
974 m_formatter.oneWordOp10Reg3Reg3(OP_CMP_reg_T1, rm, rn);
977 // xor is not spelled with an 'e'. :-(
978 ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
982 ASSERT(imm.isEncodedImm());
983 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1, rn, rd, imm);
986 // xor is not spelled with an 'e'. :-(
987 ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
992 m_formatter.twoWordOp12Reg4FourFours(OP_EOR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
995 // xor is not spelled with an 'e'. :-(
996 void eor(RegisterID rd, RegisterID rn, RegisterID rm)
998 if ((rd == rn) && !((rd | rm) & 8))
999 m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rm, rd);
1000 else if ((rd == rm) && !((rd | rn) & 8))
1001 m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rn, rd);
1003 eor(rd, rn, rm, ShiftTypeAndAmount());
1006 ALWAYS_INLINE void it(Condition cond)
1008 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond));
1011 ALWAYS_INLINE void it(Condition cond, bool inst2if)
1013 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if));
1016 ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if)
1018 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if));
1021 ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if, bool inst4if)
1023 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if, inst4if));
1026 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1027 ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1029 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1030 ASSERT(imm.isUInt12());
1032 if (!((rt | rn) & 8) && imm.isUInt7())
1033 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1034 else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
1035 m_formatter.oneWordOp5Reg3Imm8(OP_LDR_imm_T2, rt, static_cast<uint8_t>(imm.getUInt10() >> 2));
1037 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, imm.getUInt12());
1040 ALWAYS_INLINE void ldrWide8BitImmediate(RegisterID rt, RegisterID rn, uint8_t immediate)
1042 ASSERT(rn != ARMRegisters::pc);
1043 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, immediate);
1046 ALWAYS_INLINE void ldrCompact(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1048 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1049 ASSERT(imm.isUInt7());
1050 ASSERT(!((rt | rn) & 8));
1051 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1054 // If index is set, this is a regular offset or a pre-indexed load;
1055 // if index is not set then is is a post-index load.
1057 // If wback is set rn is updated - this is a pre or post index load,
1058 // if wback is not set this is a regular offset memory access.
1060 // (-255 <= offset <= 255)
1062 // _tmp = _reg + offset
1063 // MEM[index ? _tmp : _reg] = REG[rt]
1064 // if (wback) REG[rn] = _tmp
1065 ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1067 ASSERT(rt != ARMRegisters::pc);
1068 ASSERT(rn != ARMRegisters::pc);
1069 ASSERT(index || wback);
1070 ASSERT(!wback | (rt != rn));
1077 ASSERT((offset & ~0xff) == 0);
1079 offset |= (wback << 8);
1080 offset |= (add << 9);
1081 offset |= (index << 10);
1082 offset |= (1 << 11);
1084 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T4, rn, rt, offset);
1087 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1088 ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1090 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1091 ASSERT(!BadReg(rm));
1094 if (!shift && !((rt | rn | rm) & 8))
1095 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDR_reg_T1, rm, rn, rt);
1097 m_formatter.twoWordOp12Reg4FourFours(OP_LDR_reg_T2, rn, FourFours(rt, 0, shift, rm));
1100 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1101 ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1103 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1104 ASSERT(imm.isUInt12());
1105 ASSERT(!(imm.getUInt12() & 1));
1107 if (!((rt | rn) & 8) && imm.isUInt6())
1108 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 1, rn, rt);
1110 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2, rn, rt, imm.getUInt12());
1113 // If index is set, this is a regular offset or a pre-indexed load;
1114 // if index is not set then is is a post-index load.
1116 // If wback is set rn is updated - this is a pre or post index load,
1117 // if wback is not set this is a regular offset memory access.
1119 // (-255 <= offset <= 255)
1121 // _tmp = _reg + offset
1122 // MEM[index ? _tmp : _reg] = REG[rt]
1123 // if (wback) REG[rn] = _tmp
1124 ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1126 ASSERT(rt != ARMRegisters::pc);
1127 ASSERT(rn != ARMRegisters::pc);
1128 ASSERT(index || wback);
1129 ASSERT(!wback | (rt != rn));
1136 ASSERT((offset & ~0xff) == 0);
1138 offset |= (wback << 8);
1139 offset |= (add << 9);
1140 offset |= (index << 10);
1141 offset |= (1 << 11);
1143 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T3, rn, rt, offset);
1146 ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1148 ASSERT(!BadReg(rt)); // Memory hint
1149 ASSERT(rn != ARMRegisters::pc); // LDRH (literal)
1150 ASSERT(!BadReg(rm));
1153 if (!shift && !((rt | rn | rm) & 8))
1154 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRH_reg_T1, rm, rn, rt);
1156 m_formatter.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
1159 void ldrb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1161 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1162 ASSERT(imm.isUInt12());
1164 if (!((rt | rn) & 8) && imm.isUInt5())
1165 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRB_imm_T1, imm.getUInt5(), rn, rt);
1167 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T2, rn, rt, imm.getUInt12());
1170 void ldrb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1172 ASSERT(rt != ARMRegisters::pc);
1173 ASSERT(rn != ARMRegisters::pc);
1174 ASSERT(index || wback);
1175 ASSERT(!wback | (rt != rn));
1183 ASSERT(!(offset & ~0xff));
1185 offset |= (wback << 8);
1186 offset |= (add << 9);
1187 offset |= (index << 10);
1188 offset |= (1 << 11);
1190 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T3, rn, rt, offset);
1193 ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1195 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1196 ASSERT(!BadReg(rm));
1199 if (!shift && !((rt | rn | rm) & 8))
1200 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRB_reg_T1, rm, rn, rt);
1202 m_formatter.twoWordOp12Reg4FourFours(OP_LDRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
1205 void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1207 ASSERT(rn != ARMRegisters::pc);
1208 ASSERT(!BadReg(rm));
1211 if (!shift && !((rt | rn | rm) & 8))
1212 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRSB_reg_T1, rm, rn, rt);
1214 m_formatter.twoWordOp12Reg4FourFours(OP_LDRSB_reg_T2, rn, FourFours(rt, 0, shift, rm));
1217 void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1219 ASSERT(rn != ARMRegisters::pc);
1220 ASSERT(!BadReg(rm));
1223 if (!shift && !((rt | rn | rm) & 8))
1224 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRSH_reg_T1, rm, rn, rt);
1226 m_formatter.twoWordOp12Reg4FourFours(OP_LDRSH_reg_T2, rn, FourFours(rt, 0, shift, rm));
1229 void lsl(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1231 ASSERT(!BadReg(rd));
1232 ASSERT(!BadReg(rm));
1233 ShiftTypeAndAmount shift(SRType_LSL, shiftAmount);
1234 m_formatter.twoWordOp16FourFours(OP_LSL_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1237 ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, RegisterID rm)
1239 ASSERT(!BadReg(rd));
1240 ASSERT(!BadReg(rn));
1241 ASSERT(!BadReg(rm));
1242 m_formatter.twoWordOp12Reg4FourFours(OP_LSL_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1245 ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1247 ASSERT(!BadReg(rd));
1248 ASSERT(!BadReg(rm));
1249 ShiftTypeAndAmount shift(SRType_LSR, shiftAmount);
1250 m_formatter.twoWordOp16FourFours(OP_LSR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1253 ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, RegisterID rm)
1255 ASSERT(!BadReg(rd));
1256 ASSERT(!BadReg(rn));
1257 ASSERT(!BadReg(rm));
1258 m_formatter.twoWordOp12Reg4FourFours(OP_LSR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1261 ALWAYS_INLINE void movT3(RegisterID rd, ARMThumbImmediate imm)
1263 ASSERT(imm.isValid());
1264 ASSERT(!imm.isEncodedImm());
1265 ASSERT(!BadReg(rd));
1267 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3, imm.m_value.imm4, rd, imm);
1271 static void revertJumpTo_movT3movtcmpT2(void* instructionStart, RegisterID left, RegisterID right, uintptr_t imm)
1273 uint16_t* address = static_cast<uint16_t*>(instructionStart);
1274 ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm));
1275 ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm >> 16));
1276 uint16_t instruction[] = {
1277 twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16),
1278 twoWordOp5i6Imm4Reg4EncodedImmSecond(right, lo16),
1279 twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16),
1280 twoWordOp5i6Imm4Reg4EncodedImmSecond(right, hi16),
1281 static_cast<uint16_t>(OP_CMP_reg_T2 | left)
1283 performJITMemcpy(address, instruction, sizeof(uint16_t) * 5);
1284 cacheFlush(address, sizeof(uint16_t) * 5);
1287 static void revertJumpTo_movT3(void* instructionStart, RegisterID rd, ARMThumbImmediate imm)
1289 ASSERT(imm.isValid());
1290 ASSERT(!imm.isEncodedImm());
1291 ASSERT(!BadReg(rd));
1293 uint16_t* address = static_cast<uint16_t*>(instructionStart);
1294 uint16_t instruction[] = {
1295 twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, imm),
1296 twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, imm)
1298 performJITMemcpy(address, instruction, sizeof(uint16_t) * 2);
1299 cacheFlush(address, sizeof(uint16_t) * 2);
1303 ALWAYS_INLINE void mov(RegisterID rd, ARMThumbImmediate imm)
1305 ASSERT(imm.isValid());
1306 ASSERT(!BadReg(rd));
1308 if ((rd < 8) && imm.isUInt8())
1309 m_formatter.oneWordOp5Reg3Imm8(OP_MOV_imm_T1, rd, imm.getUInt8());
1310 else if (imm.isEncodedImm())
1311 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T2, 0xf, rd, imm);
1316 ALWAYS_INLINE void mov(RegisterID rd, RegisterID rm)
1318 m_formatter.oneWordOp8RegReg143(OP_MOV_reg_T1, rm, rd);
1321 ALWAYS_INLINE void movt(RegisterID rd, ARMThumbImmediate imm)
1323 ASSERT(imm.isUInt16());
1324 ASSERT(!BadReg(rd));
1325 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT, imm.m_value.imm4, rd, imm);
1328 ALWAYS_INLINE void mvn(RegisterID rd, ARMThumbImmediate imm)
1330 ASSERT(imm.isEncodedImm());
1331 ASSERT(!BadReg(rd));
1333 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm, 0xf, rd, imm);
1336 ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm, ShiftTypeAndAmount shift)
1338 ASSERT(!BadReg(rd));
1339 ASSERT(!BadReg(rm));
1340 m_formatter.twoWordOp16FourFours(OP_MVN_reg_T2, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1343 ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm)
1345 if (!((rd | rm) & 8))
1346 m_formatter.oneWordOp10Reg3Reg3(OP_MVN_reg_T1, rm, rd);
1348 mvn(rd, rm, ShiftTypeAndAmount());
1351 ALWAYS_INLINE void mrs(RegisterID rd, SPRegisterID specReg)
1353 ASSERT(specReg == ARMRegisters::apsr);
1354 ASSERT(!BadReg(rd));
1355 unsigned short specialRegisterBit = (specReg == ARMRegisters::apsr) ? 0 : (1 << 4);
1356 OpcodeID1 mrsOp = static_cast<OpcodeID1>(OP_MRS_T1 | specialRegisterBit);
1357 m_formatter.twoWordOp16FourFours(mrsOp, FourFours(0x8, rd, 0, 0));
1360 ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm)
1362 ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0);
1366 ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1368 ASSERT(!BadReg(rd));
1369 ASSERT(!BadReg(rn));
1370 ASSERT(imm.isEncodedImm());
1371 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1, rn, rd, imm);
1374 ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1376 ASSERT(!BadReg(rd));
1377 ASSERT(!BadReg(rn));
1378 ASSERT(!BadReg(rm));
1379 m_formatter.twoWordOp12Reg4FourFours(OP_ORR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1382 void orr(RegisterID rd, RegisterID rn, RegisterID rm)
1384 if ((rd == rn) && !((rd | rm) & 8))
1385 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
1386 else if ((rd == rm) && !((rd | rn) & 8))
1387 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
1389 orr(rd, rn, rm, ShiftTypeAndAmount());
1392 ALWAYS_INLINE void orr_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1394 ASSERT(!BadReg(rd));
1395 ASSERT(!BadReg(rn));
1396 ASSERT(!BadReg(rm));
1397 m_formatter.twoWordOp12Reg4FourFours(OP_ORR_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1400 void orr_S(RegisterID rd, RegisterID rn, RegisterID rm)
1402 if ((rd == rn) && !((rd | rm) & 8))
1403 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
1404 else if ((rd == rm) && !((rd | rn) & 8))
1405 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
1407 orr_S(rd, rn, rm, ShiftTypeAndAmount());
1410 ALWAYS_INLINE void ror(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1412 ASSERT(!BadReg(rd));
1413 ASSERT(!BadReg(rm));
1414 ShiftTypeAndAmount shift(SRType_ROR, shiftAmount);
1415 m_formatter.twoWordOp16FourFours(OP_ROR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1418 ALWAYS_INLINE void ror(RegisterID rd, RegisterID rn, RegisterID rm)
1420 ASSERT(!BadReg(rd));
1421 ASSERT(!BadReg(rn));
1422 ASSERT(!BadReg(rm));
1423 m_formatter.twoWordOp12Reg4FourFours(OP_ROR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1426 ALWAYS_INLINE void pop(RegisterID dest)
1428 if (dest < ARMRegisters::r8)
1429 m_formatter.oneWordOp7Imm9(OP_POP_T1, 1 << dest);
1431 // Load postindexed with writeback.
1432 ldr(dest, ARMRegisters::sp, sizeof(void*), false, true);
1436 ALWAYS_INLINE void pop(uint32_t registerList)
1438 ASSERT(WTF::bitCount(registerList) > 1);
1439 ASSERT(!((1 << ARMRegisters::pc) & registerList) || !((1 << ARMRegisters::lr) & registerList));
1440 ASSERT(!((1 << ARMRegisters::sp) & registerList));
1441 m_formatter.twoWordOp16Imm16(OP_POP_T2, registerList);
1444 ALWAYS_INLINE void push(RegisterID src)
1446 if (src < ARMRegisters::r8)
1447 m_formatter.oneWordOp7Imm9(OP_PUSH_T1, 1 << src);
1448 else if (src == ARMRegisters::lr)
1449 m_formatter.oneWordOp7Imm9(OP_PUSH_T1, 0x100);
1451 // Store preindexed with writeback.
1452 str(src, ARMRegisters::sp, -sizeof(void*), true, true);
1456 ALWAYS_INLINE void push(uint32_t registerList)
1458 ASSERT(WTF::bitCount(registerList) > 1);
1459 ASSERT(!((1 << ARMRegisters::pc) & registerList));
1460 ASSERT(!((1 << ARMRegisters::sp) & registerList));
1461 m_formatter.twoWordOp16Imm16(OP_PUSH_T2, registerList);
1464 #if HAVE(ARM_IDIV_INSTRUCTIONS)
1465 template<int datasize>
1466 ALWAYS_INLINE void sdiv(RegisterID rd, RegisterID rn, RegisterID rm)
1468 static_assert(datasize == 32, "sdiv datasize must be 32 for armv7s");
1469 ASSERT(!BadReg(rd));
1470 ASSERT(!BadReg(rn));
1471 ASSERT(!BadReg(rm));
1472 m_formatter.twoWordOp12Reg4FourFours(OP_SDIV_T1, rn, FourFours(0xf, rd, 0xf, rm));
1476 ALWAYS_INLINE void smull(RegisterID rdLo, RegisterID rdHi, RegisterID rn, RegisterID rm)
1478 ASSERT(!BadReg(rdLo));
1479 ASSERT(!BadReg(rdHi));
1480 ASSERT(!BadReg(rn));
1481 ASSERT(!BadReg(rm));
1482 ASSERT(rdLo != rdHi);
1483 m_formatter.twoWordOp12Reg4FourFours(OP_SMULL_T1, rn, FourFours(rdLo, rdHi, 0, rm));
1486 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1487 ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1489 ASSERT(rt != ARMRegisters::pc);
1490 ASSERT(rn != ARMRegisters::pc);
1491 ASSERT(imm.isUInt12());
1493 if (!((rt | rn) & 8) && imm.isUInt7())
1494 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1495 else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
1496 m_formatter.oneWordOp5Reg3Imm8(OP_STR_imm_T2, rt, static_cast<uint8_t>(imm.getUInt10() >> 2));
1498 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3, rn, rt, imm.getUInt12());
1501 // If index is set, this is a regular offset or a pre-indexed store;
1502 // if index is not set then is is a post-index store.
1504 // If wback is set rn is updated - this is a pre or post index store,
1505 // if wback is not set this is a regular offset memory access.
1507 // (-255 <= offset <= 255)
1509 // _tmp = _reg + offset
1510 // MEM[index ? _tmp : _reg] = REG[rt]
1511 // if (wback) REG[rn] = _tmp
1512 ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1514 ASSERT(rt != ARMRegisters::pc);
1515 ASSERT(rn != ARMRegisters::pc);
1516 ASSERT(index || wback);
1517 ASSERT(!wback | (rt != rn));
1524 ASSERT((offset & ~0xff) == 0);
1526 offset |= (wback << 8);
1527 offset |= (add << 9);
1528 offset |= (index << 10);
1529 offset |= (1 << 11);
1531 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T4, rn, rt, offset);
1534 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1535 ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1537 ASSERT(rn != ARMRegisters::pc);
1538 ASSERT(!BadReg(rm));
1541 if (!shift && !((rt | rn | rm) & 8))
1542 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STR_reg_T1, rm, rn, rt);
1544 m_formatter.twoWordOp12Reg4FourFours(OP_STR_reg_T2, rn, FourFours(rt, 0, shift, rm));
1547 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1548 ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1550 ASSERT(rt != ARMRegisters::pc);
1551 ASSERT(rn != ARMRegisters::pc);
1552 ASSERT(imm.isUInt12());
1554 if (!((rt | rn) & 8) && imm.isUInt7())
1555 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRB_imm_T1, imm.getUInt7() >> 2, rn, rt);
1557 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T2, rn, rt, imm.getUInt12());
1560 // If index is set, this is a regular offset or a pre-indexed store;
1561 // if index is not set then is is a post-index store.
1563 // If wback is set rn is updated - this is a pre or post index store,
1564 // if wback is not set this is a regular offset memory access.
1566 // (-255 <= offset <= 255)
1568 // _tmp = _reg + offset
1569 // MEM[index ? _tmp : _reg] = REG[rt]
1570 // if (wback) REG[rn] = _tmp
1571 ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1573 ASSERT(rt != ARMRegisters::pc);
1574 ASSERT(rn != ARMRegisters::pc);
1575 ASSERT(index || wback);
1576 ASSERT(!wback | (rt != rn));
1583 ASSERT((offset & ~0xff) == 0);
1585 offset |= (wback << 8);
1586 offset |= (add << 9);
1587 offset |= (index << 10);
1588 offset |= (1 << 11);
1590 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T3, rn, rt, offset);
1593 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1594 ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1596 ASSERT(rn != ARMRegisters::pc);
1597 ASSERT(!BadReg(rm));
1600 if (!shift && !((rt | rn | rm) & 8))
1601 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STRB_reg_T1, rm, rn, rt);
1603 m_formatter.twoWordOp12Reg4FourFours(OP_STRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
1606 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1607 ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1609 ASSERT(rt != ARMRegisters::pc);
1610 ASSERT(rn != ARMRegisters::pc);
1611 ASSERT(imm.isUInt12());
1613 if (!((rt | rn) & 8) && imm.isUInt6())
1614 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRH_imm_T1, imm.getUInt6() >> 1, rn, rt);
1616 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T2, rn, rt, imm.getUInt12());
1619 // If index is set, this is a regular offset or a pre-indexed store;
1620 // if index is not set then is is a post-index store.
1622 // If wback is set rn is updated - this is a pre or post index store,
1623 // if wback is not set this is a regular offset memory access.
1625 // (-255 <= offset <= 255)
1627 // _tmp = _reg + offset
1628 // MEM[index ? _tmp : _reg] = REG[rt]
1629 // if (wback) REG[rn] = _tmp
1630 ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1632 ASSERT(rt != ARMRegisters::pc);
1633 ASSERT(rn != ARMRegisters::pc);
1634 ASSERT(index || wback);
1635 ASSERT(!wback | (rt != rn));
1642 ASSERT(!(offset & ~0xff));
1644 offset |= (wback << 8);
1645 offset |= (add << 9);
1646 offset |= (index << 10);
1647 offset |= (1 << 11);
1649 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T3, rn, rt, offset);
1652 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1653 ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1655 ASSERT(rn != ARMRegisters::pc);
1656 ASSERT(!BadReg(rm));
1659 if (!shift && !((rt | rn | rm) & 8))
1660 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STRH_reg_T1, rm, rn, rt);
1662 m_formatter.twoWordOp12Reg4FourFours(OP_STRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
1665 ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1667 // Rd can only be SP if Rn is also SP.
1668 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1669 ASSERT(rd != ARMRegisters::pc);
1670 ASSERT(rn != ARMRegisters::pc);
1671 ASSERT(imm.isValid());
1673 if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
1674 ASSERT(!(imm.getUInt16() & 3));
1675 m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast<uint8_t>(imm.getUInt9() >> 2));
1677 } else if (!((rd | rn) & 8)) {
1678 if (imm.isUInt3()) {
1679 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
1681 } else if ((rd == rn) && imm.isUInt8()) {
1682 m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
1687 if (imm.isEncodedImm())
1688 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T3, rn, rd, imm);
1690 ASSERT(imm.isUInt12());
1691 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T4, rn, rd, imm);
1695 ALWAYS_INLINE void sub(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
1697 ASSERT(rd != ARMRegisters::pc);
1698 ASSERT(rn != ARMRegisters::pc);
1699 ASSERT(imm.isValid());
1700 ASSERT(imm.isUInt12());
1702 if (!((rd | rn) & 8) && !imm.getUInt12())
1703 m_formatter.oneWordOp10Reg3Reg3(OP_RSB_imm_T1, rn, rd);
1705 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_imm_T2, rn, rd, imm);
1708 ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1710 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1711 ASSERT(rd != ARMRegisters::pc);
1712 ASSERT(rn != ARMRegisters::pc);
1713 ASSERT(!BadReg(rm));
1714 m_formatter.twoWordOp12Reg4FourFours(OP_SUB_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1717 // NOTE: In an IT block, add doesn't modify the flags register.
1718 ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm)
1720 if (!((rd | rn | rm) & 8))
1721 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
1723 sub(rd, rn, rm, ShiftTypeAndAmount());
1726 // Not allowed in an IT (if then) block.
1727 void sub_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1729 // Rd can only be SP if Rn is also SP.
1730 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1731 ASSERT(rd != ARMRegisters::pc);
1732 ASSERT(rn != ARMRegisters::pc);
1733 ASSERT(imm.isValid());
1735 if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
1736 ASSERT(!(imm.getUInt16() & 3));
1737 m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast<uint8_t>(imm.getUInt9() >> 2));
1739 } else if (!((rd | rn) & 8)) {
1740 if (imm.isUInt3()) {
1741 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
1743 } else if ((rd == rn) && imm.isUInt8()) {
1744 m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
1749 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3, rn, rd, imm);
1752 ALWAYS_INLINE void sub_S(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
1754 ASSERT(rd != ARMRegisters::pc);
1755 ASSERT(rn != ARMRegisters::pc);
1756 ASSERT(imm.isValid());
1757 ASSERT(imm.isUInt12());
1759 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_S_imm_T2, rn, rd, imm);
1762 // Not allowed in an IT (if then) block?
1763 ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1765 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1766 ASSERT(rd != ARMRegisters::pc);
1767 ASSERT(rn != ARMRegisters::pc);
1768 ASSERT(!BadReg(rm));
1769 m_formatter.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1772 // Not allowed in an IT (if then) block.
1773 ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm)
1775 if (!((rd | rn | rm) & 8))
1776 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
1778 sub_S(rd, rn, rm, ShiftTypeAndAmount());
1781 ALWAYS_INLINE void tst(RegisterID rn, ARMThumbImmediate imm)
1783 ASSERT(!BadReg(rn));
1784 ASSERT(imm.isEncodedImm());
1786 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm, rn, (RegisterID)0xf, imm);
1789 ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1791 ASSERT(!BadReg(rn));
1792 ASSERT(!BadReg(rm));
1793 m_formatter.twoWordOp12Reg4FourFours(OP_TST_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
1796 ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm)
1799 tst(rn, rm, ShiftTypeAndAmount());
1801 m_formatter.oneWordOp10Reg3Reg3(OP_TST_reg_T1, rm, rn);
1804 ALWAYS_INLINE void ubfx(RegisterID rd, RegisterID rn, unsigned lsb, unsigned width)
1807 ASSERT((width >= 1) && (width <= 32));
1808 ASSERT((lsb + width) <= 32);
1809 m_formatter.twoWordOp12Reg40Imm3Reg4Imm20Imm5(OP_UBFX_T1, rd, rn, (lsb & 0x1c) << 10, (lsb & 0x3) << 6, (width - 1) & 0x1f);
1812 #if HAVE(ARM_IDIV_INSTRUCTIONS)
1813 ALWAYS_INLINE void udiv(RegisterID rd, RegisterID rn, RegisterID rm)
1815 ASSERT(!BadReg(rd));
1816 ASSERT(!BadReg(rn));
1817 ASSERT(!BadReg(rm));
1818 m_formatter.twoWordOp12Reg4FourFours(OP_UDIV_T1, rn, FourFours(0xf, rd, 0xf, rm));
1822 void vadd(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1824 m_formatter.vfpOp(OP_VADD_T2, OP_VADD_T2b, true, rn, rd, rm);
1827 void vcmp(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1829 m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(4), rd, rm);
1832 void vcmpz(FPDoubleRegisterID rd)
1834 m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(5), rd, VFPOperand(0));
1837 void vcvt_signedToFloatingPoint(FPDoubleRegisterID rd, FPSingleRegisterID rm)
1839 // boolean values are 64bit (toInt, unsigned, roundZero)
1840 m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(false, false, false), rd, rm);
1843 void vcvt_floatingPointToSigned(FPSingleRegisterID rd, FPDoubleRegisterID rm)
1845 // boolean values are 64bit (toInt, unsigned, roundZero)
1846 m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, false, true), rd, rm);
1849 void vcvt_floatingPointToUnsigned(FPSingleRegisterID rd, FPDoubleRegisterID rm)
1851 // boolean values are 64bit (toInt, unsigned, roundZero)
1852 m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, true, true), rd, rm);
1855 void vdiv(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1857 m_formatter.vfpOp(OP_VDIV, OP_VDIVb, true, rn, rd, rm);
1860 void vldr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
1862 m_formatter.vfpMemOp(OP_VLDR, OP_VLDRb, true, rn, rd, imm);
1865 void flds(FPSingleRegisterID rd, RegisterID rn, int32_t imm)
1867 m_formatter.vfpMemOp(OP_FLDS, OP_FLDSb, false, rn, rd, imm);
1870 void vmov(RegisterID rd, FPSingleRegisterID rn)
1872 ASSERT(!BadReg(rd));
1873 m_formatter.vfpOp(OP_VMOV_StoC, OP_VMOV_StoCb, false, rn, rd, VFPOperand(0));
1876 void vmov(FPSingleRegisterID rd, RegisterID rn)
1878 ASSERT(!BadReg(rn));
1879 m_formatter.vfpOp(OP_VMOV_CtoS, OP_VMOV_CtoSb, false, rd, rn, VFPOperand(0));
1882 void vmov(RegisterID rd1, RegisterID rd2, FPDoubleRegisterID rn)
1884 ASSERT(!BadReg(rd1));
1885 ASSERT(!BadReg(rd2));
1886 m_formatter.vfpOp(OP_VMOV_DtoC, OP_VMOV_DtoCb, true, rd2, VFPOperand(rd1 | 16), rn);
1889 void vmov(FPDoubleRegisterID rd, RegisterID rn1, RegisterID rn2)
1891 ASSERT(!BadReg(rn1));
1892 ASSERT(!BadReg(rn2));
1893 m_formatter.vfpOp(OP_VMOV_CtoD, OP_VMOV_CtoDb, true, rn2, VFPOperand(rn1 | 16), rd);
1896 void vmov(FPDoubleRegisterID rd, FPDoubleRegisterID rn)
1898 m_formatter.vfpOp(OP_VMOV_T2, OP_VMOV_T2b, true, VFPOperand(0), rd, rn);
1901 void vmrs(RegisterID reg = ARMRegisters::pc)
1903 ASSERT(reg != ARMRegisters::sp);
1904 m_formatter.vfpOp(OP_VMRS, OP_VMRSb, false, VFPOperand(1), VFPOperand(0x10 | reg), VFPOperand(0));
1907 void vmul(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1909 m_formatter.vfpOp(OP_VMUL_T2, OP_VMUL_T2b, true, rn, rd, rm);
1912 void vstr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
1914 m_formatter.vfpMemOp(OP_VSTR, OP_VSTRb, true, rn, rd, imm);
1917 void fsts(FPSingleRegisterID rd, RegisterID rn, int32_t imm)
1919 m_formatter.vfpMemOp(OP_FSTS, OP_FSTSb, false, rn, rd, imm);
1922 void vsub(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1924 m_formatter.vfpOp(OP_VSUB_T2, OP_VSUB_T2b, true, rn, rd, rm);
1927 void vabs(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1929 m_formatter.vfpOp(OP_VABS_T2, OP_VABS_T2b, true, VFPOperand(16), rd, rm);
1932 void vneg(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1934 m_formatter.vfpOp(OP_VNEG_T2, OP_VNEG_T2b, true, VFPOperand(1), rd, rm);
1937 void vsqrt(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1939 m_formatter.vfpOp(OP_VSQRT_T1, OP_VSQRT_T1b, true, VFPOperand(17), rd, rm);
1942 void vcvtds(FPDoubleRegisterID rd, FPSingleRegisterID rm)
1944 m_formatter.vfpOp(OP_VCVTDS_T1, OP_VCVTDS_T1b, false, VFPOperand(23), rd, rm);
1947 void vcvtsd(FPSingleRegisterID rd, FPDoubleRegisterID rm)
1949 m_formatter.vfpOp(OP_VCVTSD_T1, OP_VCVTSD_T1b, true, VFPOperand(23), rd, rm);
1954 m_formatter.oneWordOp8Imm8(OP_NOP_T1, 0);
1959 m_formatter.twoWordOp16Op16(OP_NOP_T2a, OP_NOP_T2b);
1962 static constexpr int16_t nopPseudo16()
1967 static constexpr int32_t nopPseudo32()
1969 return OP_NOP_T2a | (OP_NOP_T2b << 16);
1972 using CopyFunction = void*(&)(void*, const void*, size_t);
1974 template <CopyFunction copy>
1975 static void fillNops(void* base, size_t size)
1977 RELEASE_ASSERT(!(size % sizeof(int16_t)));
1979 char* ptr = static_cast<char*>(base);
1980 const size_t num32s = size / sizeof(int32_t);
1981 for (size_t i = 0; i < num32s; i++) {
1982 const int32_t insn = nopPseudo32();
1983 copy(ptr, &insn, sizeof(int32_t));
1984 ptr += sizeof(int32_t);
1987 const size_t num16s = (size % sizeof(int32_t)) / sizeof(int16_t);
1988 ASSERT(num16s == 0 || num16s == 1);
1989 ASSERT(num16s * sizeof(int16_t) + num32s * sizeof(int32_t) == size);
1991 const int16_t insn = nopPseudo16();
1992 copy(ptr, &insn, sizeof(int16_t));
1998 m_formatter.twoWordOp16Op16(OP_DMB_T1a, OP_DMB_SY_T1b);
2003 m_formatter.twoWordOp16Op16(OP_DMB_T1a, OP_DMB_ISHST_T1b);
2006 AssemblerLabel labelIgnoringWatchpoints()
2008 return m_formatter.label();
2011 AssemblerLabel labelForWatchpoint()
2013 AssemblerLabel result = m_formatter.label();
2014 if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint)
2016 m_indexOfLastWatchpoint = result.m_offset;
2017 m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
2021 AssemblerLabel label()
2023 AssemblerLabel result = m_formatter.label();
2024 while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
2025 if (UNLIKELY(static_cast<int>(result.m_offset) + 4 <= m_indexOfTailOfLastWatchpoint))
2029 result = m_formatter.label();
2034 AssemblerLabel align(int alignment)
2036 while (!m_formatter.isAligned(alignment))
2042 static void* getRelocatedAddress(void* code, AssemblerLabel label)
2044 ASSERT(label.isSet());
2045 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset);
2048 static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
2050 return b.m_offset - a.m_offset;
2053 static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
2055 // Assembler admin methods:
2057 static ALWAYS_INLINE bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b)
2059 return a.from() < b.from();
2062 static bool canCompact(JumpType jumpType)
2064 // The following cannot be compacted:
2065 // JumpFixed: represents custom jump sequence
2066 // JumpNoConditionFixedSize: represents unconditional jump that must remain a fixed size
2067 // JumpConditionFixedSize: represents conditional jump that must remain a fixed size
2068 return (jumpType == JumpNoCondition) || (jumpType == JumpCondition);
2071 static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
2073 if (jumpType == JumpFixed)
2076 // for patchable jump we must leave space for the longest code sequence
2077 if (jumpType == JumpNoConditionFixedSize)
2079 if (jumpType == JumpConditionFixedSize)
2080 return LinkConditionalBX;
2082 const int paddingSize = JUMP_ENUM_SIZE(jumpType);
2084 if (jumpType == JumpCondition) {
2085 // 2-byte conditional T1
2086 const uint16_t* jumpT1Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT1)));
2087 if (canBeJumpT1(jumpT1Location, to))
2089 // 4-byte conditional T3
2090 const uint16_t* jumpT3Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT3)));
2091 if (canBeJumpT3(jumpT3Location, to))
2093 // 4-byte conditional T4 with IT
2094 const uint16_t* conditionalJumpT4Location =
2095 reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkConditionalJumpT4)));
2096 if (canBeJumpT4(conditionalJumpT4Location, to))
2097 return LinkConditionalJumpT4;
2099 // 2-byte unconditional T2
2100 const uint16_t* jumpT2Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT2)));
2101 if (canBeJumpT2(jumpT2Location, to))
2103 // 4-byte unconditional T4
2104 const uint16_t* jumpT4Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT4)));
2105 if (canBeJumpT4(jumpT4Location, to))
2107 // use long jump sequence
2111 ASSERT(jumpType == JumpCondition);
2112 return LinkConditionalBX;
2115 static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
2117 JumpLinkType linkType = computeJumpType(record.type(), from, to);
2118 record.setLinkType(linkType);
2122 Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink()
2124 std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator);
2125 return m_jumpsToLink;
2128 template<CopyFunction copy>
2129 static void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction8, uint8_t* to)
2131 const uint16_t* fromInstruction = reinterpret_cast_ptr<const uint16_t*>(fromInstruction8);
2132 switch (record.linkType()) {
2134 linkJumpT1<copy>(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to);
2137 linkJumpT2<copy>(reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to);
2140 linkJumpT3<copy>(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to);
2143 linkJumpT4<copy>(reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to);
2145 case LinkConditionalJumpT4:
2146 linkConditionalJumpT4<copy>(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to);
2148 case LinkConditionalBX:
2149 linkConditionalBX<copy>(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to);
2152 linkBX<copy>(reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to);
2155 RELEASE_ASSERT_NOT_REACHED();
2160 size_t codeSize() const { return m_formatter.codeSize(); }
2162 static unsigned getCallReturnOffset(AssemblerLabel call)
2164 ASSERT(call.isSet());
2165 return call.m_offset;
2168 // Linking & patching:
2170 // 'link' and 'patch' methods are for use on unprotected code - such as the code
2171 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
2172 // code has been finalized it is (platform support permitting) within a non-
2173 // writable region of memory; to modify the code in an execute-only execuable
2174 // pool the 'repatch' and 'relink' methods should be used.
2176 void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition)
2179 ASSERT(from.isSet());
2180 m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition));
2183 static void linkJump(void* code, AssemblerLabel from, void* to)
2185 ASSERT(from.isSet());
2187 uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
2188 linkJumpAbsolute(location, location, to);
2191 static void linkCall(void* code, AssemblerLabel from, void* to)
2193 ASSERT(!(reinterpret_cast<intptr_t>(code) & 1));
2194 ASSERT(from.isSet());
2196 setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset) - 1, to, false);
2199 static void linkPointer(void* code, AssemblerLabel where, void* value)
2201 setPointer(reinterpret_cast<char*>(code) + where.m_offset, value, false);
2204 // The static relink and replace methods can use can use |from| for both
2205 // the write and executable address for call and jump patching
2206 // as they're modifying existing (linked) code, so the address being
2207 // provided is correct for relative address computation.
2208 static void relinkJump(void* from, void* to)
2210 ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
2211 ASSERT(!(reinterpret_cast<intptr_t>(to) & 1));
2213 linkJumpAbsolute(reinterpret_cast<uint16_t*>(from), reinterpret_cast<uint16_t*>(from), to);
2215 cacheFlush(reinterpret_cast<uint16_t*>(from) - 5, 5 * sizeof(uint16_t));
2218 static void relinkJumpToNop(void* from)
2220 relinkJump(from, from);
2223 static void relinkCall(void* from, void* to)
2225 ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
2227 setPointer(reinterpret_cast<uint16_t*>(from) - 1, to, true);
2230 static void* readCallTarget(void* from)
2232 return readPointer(reinterpret_cast<uint16_t*>(from) - 1);
2235 static void repatchInt32(void* where, int32_t value)
2237 ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
2239 setInt32(where, value, true);
2242 static void repatchCompact(void* where, int32_t offset)
2244 ASSERT(offset >= -255 && offset <= 255);
2252 offset |= (add << 9);
2253 offset |= (1 << 10);
2254 offset |= (1 << 11);
2256 uint16_t* location = reinterpret_cast<uint16_t*>(where);
2257 uint16_t instruction = location[1] & ~((1 << 12) - 1);
2258 instruction |= offset;
2259 performJITMemcpy(location + 1, &instruction, sizeof(uint16_t));
2260 cacheFlush(location, sizeof(uint16_t) * 2);
2263 static void repatchPointer(void* where, void* value)
2265 ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
2267 setPointer(where, value, true);
2270 static void* readPointer(void* where)
2272 return reinterpret_cast<void*>(readInt32(where));
2275 static void replaceWithJump(void* instructionStart, void* to)
2277 ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
2278 ASSERT(!(bitwise_cast<uintptr_t>(to) & 1));
2281 if (canBeJumpT4(reinterpret_cast<uint16_t*>(instructionStart), to)) {
2282 uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 2;
2283 linkJumpT4(ptr, ptr, to);
2284 cacheFlush(ptr - 2, sizeof(uint16_t) * 2);
2286 uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 5;
2287 linkBX(ptr, ptr, to);
2288 cacheFlush(ptr - 5, sizeof(uint16_t) * 5);
2291 uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 2;
2292 linkJumpT4(ptr, ptr, to);
2293 cacheFlush(ptr - 2, sizeof(uint16_t) * 2);
2297 static ptrdiff_t maxJumpReplacementSize()
2306 static constexpr ptrdiff_t patchableJumpSize()
2311 static void replaceWithLoad(void* instructionStart)
2313 ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
2314 uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart);
2315 switch (ptr[0] & 0xFFF0) {
2318 case OP_ADD_imm_T3: {
2319 ASSERT(!(ptr[1] & 0xF000));
2320 uint16_t instructions[2];
2321 instructions[0] = ptr[0] & 0x000F;
2322 instructions[0] |= OP_LDR_imm_T3;
2323 instructions[1] = ptr[1] | (ptr[1] & 0x0F00) << 4;
2324 instructions[1] &= 0xF0FF;
2325 performJITMemcpy(ptr, instructions, sizeof(uint16_t) * 2);
2326 cacheFlush(ptr, sizeof(uint16_t) * 2);
2330 RELEASE_ASSERT_NOT_REACHED();
2334 static void replaceWithAddressComputation(void* instructionStart)
2336 ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
2337 uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart);
2338 switch (ptr[0] & 0xFFF0) {
2339 case OP_LDR_imm_T3: {
2340 ASSERT(!(ptr[1] & 0x0F00));
2341 uint16_t instructions[2];
2342 instructions[0] = ptr[0] & 0x000F;
2343 instructions[0] |= OP_ADD_imm_T3;
2344 instructions[1] = ptr[1] | (ptr[1] & 0xF000) >> 4;
2345 instructions[1] &= 0x0FFF;
2346 performJITMemcpy(ptr, instructions, sizeof(uint16_t) * 2);
2347 cacheFlush(ptr, sizeof(uint16_t) * 2);
2353 RELEASE_ASSERT_NOT_REACHED();
2357 unsigned debugOffset() { return m_formatter.debugOffset(); }
2360 static inline void linuxPageFlush(uintptr_t begin, uintptr_t end)
2372 : "r" (begin), "r" (end)
2373 : "r0", "r1", "r2");
2377 static void cacheFlush(void* code, size_t size)
2380 sys_cache_control(kCacheFunctionPrepareForExecution, code, size);
2382 size_t page = pageSize();
2383 uintptr_t current = reinterpret_cast<uintptr_t>(code);
2384 uintptr_t end = current + size;
2385 uintptr_t firstPageEnd = (current & ~(page - 1)) + page;
2387 if (end <= firstPageEnd) {
2388 linuxPageFlush(current, end);
2392 linuxPageFlush(current, firstPageEnd);
2394 for (current = firstPageEnd; current + page < end; current += page)
2395 linuxPageFlush(current, current + page);
2397 linuxPageFlush(current, end);
2399 #error "The cacheFlush support is missing on this platform."
2404 // VFP operations commonly take one or more 5-bit operands, typically representing a
2405 // floating point register number. This will commonly be encoded in the instruction
2406 // in two parts, with one single bit field, and one 4-bit field. In the case of
2407 // double precision operands the high bit of the register number will be encoded
2408 // separately, and for single precision operands the high bit of the register number
2409 // will be encoded individually.
2410 // VFPOperand encapsulates a 5-bit VFP operand, with bits 0..3 containing the 4-bit
2411 // field to be encoded together in the instruction (the low 4-bits of a double
2412 // register number, or the high 4-bits of a single register number), and bit 4
2413 // contains the bit value to be encoded individually.
2415 explicit VFPOperand(uint32_t value)
2418 ASSERT(!(m_value & ~0x1f));
2421 VFPOperand(FPDoubleRegisterID reg)
2426 VFPOperand(RegisterID reg)
2431 VFPOperand(FPSingleRegisterID reg)
2432 : m_value(((reg & 1) << 4) | (reg >> 1)) // rotate the lowest bit of 'reg' to the top.
2438 return m_value >> 4;
2443 return m_value & 0xf;
2449 VFPOperand vcvtOp(bool toInteger, bool isUnsigned, bool isRoundZero)
2451 // Cannot specify rounding when converting to float.
2452 ASSERT(toInteger || !isRoundZero);
2456 // opc2 indicates both toInteger & isUnsigned.
2457 op |= isUnsigned ? 0x4 : 0x5;
2458 // 'op' field in instruction is isRoundZero
2462 ASSERT(!isRoundZero);
2463 // 'op' field in instruction is isUnsigned
2467 return VFPOperand(op);
2470 static void setInt32(void* code, uint32_t value, bool flush)
2472 uint16_t* location = reinterpret_cast<uint16_t*>(code);
2473 ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
2475 ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value));
2476 ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value >> 16));
2477 uint16_t instructions[4];
2478 instructions[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2479 instructions[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-3] >> 8) & 0xf, lo16);
2480 instructions[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2481 instructions[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-1] >> 8) & 0xf, hi16);
2483 performJITMemcpy(location - 4, instructions, 4 * sizeof(uint16_t));
2485 cacheFlush(location - 4, 4 * sizeof(uint16_t));
2488 static int32_t readInt32(void* code)
2490 uint16_t* location = reinterpret_cast<uint16_t*>(code);
2491 ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
2493 ARMThumbImmediate lo16;
2494 ARMThumbImmediate hi16;
2495 decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(lo16, location[-4]);
2496 decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(lo16, location[-3]);
2497 decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(hi16, location[-2]);
2498 decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(hi16, location[-1]);
2499 uint32_t result = hi16.asUInt16();
2501 result |= lo16.asUInt16();
2502 return static_cast<int32_t>(result);
2505 static void setUInt7ForLoad(void* code, ARMThumbImmediate imm)
2507 // Requires us to have planted a LDR_imm_T1
2508 ASSERT(imm.isValid());
2509 ASSERT(imm.isUInt7());
2510 uint16_t* location = reinterpret_cast<uint16_t*>(code);
2511 uint16_t instruction;
2512 instruction = location[0] & ~((static_cast<uint16_t>(0x7f) >> 2) << 6);
2513 instruction |= (imm.getUInt7() >> 2) << 6;
2514 performJITMemcpy(location, &instruction, sizeof(uint16_t));
2515 cacheFlush(location, sizeof(uint16_t));
2518 static void setPointer(void* code, void* value, bool flush)
2520 setInt32(code, reinterpret_cast<uint32_t>(value), flush);
2523 static bool isB(const void* address)
2525 const uint16_t* instruction = static_cast<const uint16_t*>(address);
2526 return ((instruction[0] & 0xf800) == OP_B_T4a) && ((instruction[1] & 0xd000) == OP_B_T4b);
2529 static bool isBX(const void* address)
2531 const uint16_t* instruction = static_cast<const uint16_t*>(address);
2532 return (instruction[0] & 0xff87) == OP_BX;
2535 static bool isMOV_imm_T3(const void* address)
2537 const uint16_t* instruction = static_cast<const uint16_t*>(address);
2538 return ((instruction[0] & 0xFBF0) == OP_MOV_imm_T3) && ((instruction[1] & 0x8000) == 0);
2541 static bool isMOVT(const void* address)
2543 const uint16_t* instruction = static_cast<const uint16_t*>(address);
2544 return ((instruction[0] & 0xFBF0) == OP_MOVT) && ((instruction[1] & 0x8000) == 0);
2547 static bool isNOP_T1(const void* address)
2549 const uint16_t* instruction = static_cast<const uint16_t*>(address);
2550 return instruction[0] == OP_NOP_T1;
2553 static bool isNOP_T2(const void* address)
2555 const uint16_t* instruction = static_cast<const uint16_t*>(address);
2556 return (instruction[0] == OP_NOP_T2a) && (instruction[1] == OP_NOP_T2b);
2559 static bool canBeJumpT1(const uint16_t* instruction, const void* target)
2561 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2562 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2564 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2565 // It does not appear to be documented in the ARM ARM (big surprise), but
2566 // for OP_B_T1 the branch displacement encoded in the instruction is 2
2567 // less than the actual displacement.
2569 return ((relative << 23) >> 23) == relative;
2572 static bool canBeJumpT2(const uint16_t* instruction, const void* target)
2574 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2575 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2577 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2578 // It does not appear to be documented in the ARM ARM (big surprise), but
2579 // for OP_B_T2 the branch displacement encoded in the instruction is 2
2580 // less than the actual displacement.
2582 return ((relative << 20) >> 20) == relative;
2585 static bool canBeJumpT3(const uint16_t* instruction, const void* target)
2587 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2588 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2590 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2591 return ((relative << 11) >> 11) == relative;
2594 static bool canBeJumpT4(const uint16_t* instruction, const void* target)
2596 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2597 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2599 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2600 return ((relative << 7) >> 7) == relative;
2603 template<CopyFunction copy = performJITMemcpy>
2604 static void linkJumpT1(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target)
2606 // FIMXE: this should be up in the MacroAssembler layer. :-(
2607 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2608 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2609 ASSERT(canBeJumpT1(instruction, target));
2611 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2612 // It does not appear to be documented in the ARM ARM (big surprise), but
2613 // for OP_B_T1 the branch displacement encoded in the instruction is 2
2614 // less than the actual displacement.
2617 // All branch offsets should be an even distance.
2618 ASSERT(!(relative & 1));
2619 uint16_t newInstruction = OP_B_T1 | ((cond & 0xf) << 8) | ((relative & 0x1fe) >> 1);
2620 copy(writeTarget - 1, &newInstruction, sizeof(uint16_t));
2623 template<CopyFunction copy = performJITMemcpy>
2624 static void linkJumpT2(uint16_t* writeTarget, const uint16_t* instruction, void* target)
2626 // FIMXE: this should be up in the MacroAssembler layer. :-(
2627 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2628 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2629 ASSERT(canBeJumpT2(instruction, target));
2631 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2632 // It does not appear to be documented in the ARM ARM (big surprise), but
2633 // for OP_B_T2 the branch displacement encoded in the instruction is 2
2634 // less than the actual displacement.
2637 // All branch offsets should be an even distance.
2638 ASSERT(!(relative & 1));
2639 uint16_t newInstruction = OP_B_T2 | ((relative & 0xffe) >> 1);
2640 copy(writeTarget - 1, &newInstruction, sizeof(uint16_t));
2643 template<CopyFunction copy = performJITMemcpy>
2644 static void linkJumpT3(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target)
2646 // FIMXE: this should be up in the MacroAssembler layer. :-(
2647 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2648 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2649 ASSERT(canBeJumpT3(instruction, target));
2651 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2653 // All branch offsets should be an even distance.
2654 ASSERT(!(relative & 1));
2655 uint16_t instructions[2];
2656 instructions[0] = OP_B_T3a | ((relative & 0x100000) >> 10) | ((cond & 0xf) << 6) | ((relative & 0x3f000) >> 12);
2657 instructions[1] = OP_B_T3b | ((relative & 0x80000) >> 8) | ((relative & 0x40000) >> 5) | ((relative & 0xffe) >> 1);
2658 copy(writeTarget - 2, instructions, 2 * sizeof(uint16_t));
2661 template<CopyFunction copy = performJITMemcpy>
2662 static void linkJumpT4(uint16_t* writeTarget, const uint16_t* instruction, void* target)
2664 // FIMXE: this should be up in the MacroAssembler layer. :-(
2665 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2666 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2667 ASSERT(canBeJumpT4(instruction, target));
2669 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2670 // ARM encoding for the top two bits below the sign bit is 'peculiar'.
2672 relative ^= 0xC00000;
2674 // All branch offsets should be an even distance.
2675 ASSERT(!(relative & 1));
2676 uint16_t instructions[2];
2677 instructions[0] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
2678 instructions[1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
2679 copy(writeTarget - 2, instructions, 2 * sizeof(uint16_t));
2682 template<CopyFunction copy = performJITMemcpy>
2683 static void linkConditionalJumpT4(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target)
2685 // FIMXE: this should be up in the MacroAssembler layer. :-(
2686 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2687 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2689 uint16_t newInstruction = ifThenElse(cond) | OP_IT;
2690 copy(writeTarget - 3, &newInstruction, sizeof(uint16_t));
2691 linkJumpT4<copy>(writeTarget, instruction, target);
2694 template<CopyFunction copy = performJITMemcpy>
2695 static void linkBX(uint16_t* writeTarget, const uint16_t* instruction, void* target)
2697 // FIMXE: this should be up in the MacroAssembler layer. :-(
2698 ASSERT_UNUSED(instruction, !(reinterpret_cast<intptr_t>(instruction) & 1));
2699 ASSERT(!(reinterpret_cast<intptr_t>(writeTarget) & 1));
2700 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2702 const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
2703 ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
2704 ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
2705 uint16_t instructions[5];
2706 instructions[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2707 instructions[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
2708 instructions[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2709 instructions[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
2710 instructions[4] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
2712 copy(writeTarget - 5, instructions, 5 * sizeof(uint16_t));
2715 template<CopyFunction copy = performJITMemcpy>
2716 static void linkConditionalBX(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target)
2718 // FIMXE: this should be up in the MacroAssembler layer. :-(
2719 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2720 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2722 linkBX(writeTarget, instruction, target);
2723 uint16_t newInstruction = ifThenElse(cond, true, true) | OP_IT;
2724 copy(writeTarget - 6, &newInstruction, sizeof(uint16_t));
2727 static void linkJumpAbsolute(uint16_t* writeTarget, const uint16_t* instruction, void* target)
2729 // FIMXE: this should be up in the MacroAssembler layer. :-(
2730 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2731 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2733 ASSERT((isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1))
2734 || (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2)));
2736 if (canBeJumpT4(instruction, target)) {
2737 // There may be a better way to fix this, but right now put the NOPs first, since in the
2738 // case of an conditional branch this will be coming after an ITTT predicating *three*
2739 // instructions! Looking backwards to modify the ITTT to an IT is not easy, due to
2740 // variable wdith encoding - the previous instruction might *look* like an ITTT but
2741 // actually be the second half of a 2-word op.
2742 uint16_t instructions[3];
2743 instructions[0] = OP_NOP_T1;
2744 instructions[1] = OP_NOP_T2a;
2745 instructions[2] = OP_NOP_T2b;
2746 performJITMemcpy(writeTarget - 5, instructions, 3 * sizeof(uint16_t));
2747 linkJumpT4(writeTarget, instruction, target);
2749 const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
2750 ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
2751 ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
2753 uint16_t instructions[5];
2754 instructions[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2755 instructions[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
2756 instructions[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2757 instructions[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
2758 instructions[4] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
2759 performJITMemcpy(writeTarget - 5, instructions, 5 * sizeof(uint16_t));
2763 static uint16_t twoWordOp5i6Imm4Reg4EncodedImmFirst(uint16_t op, ARMThumbImmediate imm)
2765 return op | (imm.m_value.i << 10) | imm.m_value.imm4;
2768 static void decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(ARMThumbImmediate& result, uint16_t value)
2770 result.m_value.i = (value >> 10) & 1;
2771 result.m_value.imm4 = value & 15;
2774 static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd, ARMThumbImmediate imm)
2776 return (imm.m_value.imm3 << 12) | (rd << 8) | imm.m_value.imm8;
2779 static void decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(ARMThumbImmediate& result, uint16_t value)
2781 result.m_value.imm3 = (value >> 12) & 7;
2782 result.m_value.imm8 = value & 255;
2785 class ARMInstructionFormatter {
2787 ALWAYS_INLINE void oneWordOp5Reg3Imm8(OpcodeID op, RegisterID rd, uint8_t imm)
2789 m_buffer.putShort(op | (rd << 8) | imm);
2792 ALWAYS_INLINE void oneWordOp5Imm5Reg3Reg3(OpcodeID op, uint8_t imm, RegisterID reg1, RegisterID reg2)
2794 m_buffer.putShort(op | (imm << 6) | (reg1 << 3) | reg2);
2797 ALWAYS_INLINE void oneWordOp7Reg3Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2, RegisterID reg3)
2799 m_buffer.putShort(op | (reg1 << 6) | (reg2 << 3) | reg3);
2802 ALWAYS_INLINE void oneWordOp7Imm9(OpcodeID op, uint16_t imm)
2804 m_buffer.putShort(op | imm);
2807 ALWAYS_INLINE void oneWordOp8Imm8(OpcodeID op, uint8_t imm)
2809 m_buffer.putShort(op | imm);
2812 ALWAYS_INLINE void oneWordOp8RegReg143(OpcodeID op, RegisterID reg1, RegisterID reg2)
2814 m_buffer.putShort(op | ((reg2 & 8) << 4) | (reg1 << 3) | (reg2 & 7));
2817 ALWAYS_INLINE void oneWordOp9Imm7(OpcodeID op, uint8_t imm)
2819 m_buffer.putShort(op | imm);
2822 ALWAYS_INLINE void oneWordOp10Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2)
2824 m_buffer.putShort(op | (reg1 << 3) | reg2);
2827 ALWAYS_INLINE void twoWordOp12Reg4FourFours(OpcodeID1 op, RegisterID reg, FourFours ff)
2829 m_buffer.putShort(op | reg);
2830 m_buffer.putShort(ff.m_u.value);
2833 ALWAYS_INLINE void twoWordOp16FourFours(OpcodeID1 op, FourFours ff)
2835 m_buffer.putShort(op);
2836 m_buffer.putShort(ff.m_u.value);
2839 ALWAYS_INLINE void twoWordOp16Op16(OpcodeID1 op1, OpcodeID2 op2)
2841 m_buffer.putShort(op1);
2842 m_buffer.putShort(op2);
2845 ALWAYS_INLINE void twoWordOp16Imm16(OpcodeID1 op1, uint16_t imm)
2847 m_buffer.putShort(op1);
2848 m_buffer.putShort(imm);
2851 ALWAYS_INLINE void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm)
2853 ARMThumbImmediate newImm = imm;
2854 newImm.m_value.imm4 = imm4;
2856 m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmFirst(op, newImm));
2857 m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, newImm));
2860 ALWAYS_INLINE void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm)
2862 m_buffer.putShort(op | reg1);
2863 m_buffer.putShort((reg2 << 12) | imm);
2866 ALWAYS_INLINE void twoWordOp12Reg40Imm3Reg4Imm20Imm5(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm1, uint16_t imm2, uint16_t imm3)
2868 m_buffer.putShort(op | reg1);
2869 m_buffer.putShort((imm1 << 12) | (reg2 << 8) | (imm2 << 6) | imm3);
2872 // Formats up instructions of the pattern:
2873 // 111111111B11aaaa:bbbb222SA2C2cccc
2874 // Where 1s in the pattern come from op1, 2s in the pattern come from op2, S is the provided size bit.
2875 // Operands provide 5 bit values of the form Aaaaa, Bbbbb, Ccccc.
2876 ALWAYS_INLINE void vfpOp(OpcodeID1 op1, OpcodeID2 op2, bool size, VFPOperand a, VFPOperand b, VFPOperand c)
2878 ASSERT(!(op1 & 0x004f));
2879 ASSERT(!(op2 & 0xf1af));
2880 m_buffer.putShort(op1 | b.bits1() << 6 | a.bits4());
2881 m_buffer.putShort(op2 | b.bits4() << 12 | size << 8 | a.bits1() << 7 | c.bits1() << 5 | c.bits4());
2884 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
2885 // (i.e. +/-(0..255) 32-bit words)
2886 ALWAYS_INLINE void vfpMemOp(OpcodeID1 op1, OpcodeID2 op2, bool size, RegisterID rn, VFPOperand rd, int32_t imm)
2894 uint32_t offset = imm;
2895 ASSERT(!(offset & ~0x3fc));
2898 m_buffer.putShort(op1 | (up << 7) | rd.bits1() << 6 | rn);
2899 m_buffer.putShort(op2 | rd.bits4() << 12 | size << 8 | offset);
2902 // Administrative methods:
2904 size_t codeSize() const { return m_buffer.codeSize(); }
2905 AssemblerLabel label() const { return m_buffer.label(); }
2906 bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
2907 void* data() const { return m_buffer.data(); }
2909 unsigned debugOffset() { return m_buffer.debugOffset(); }
2911 AssemblerBuffer m_buffer;
2914 Vector<LinkRecord, 0, UnsafeVectorOverflow> m_jumpsToLink;
2915 int m_indexOfLastWatchpoint;
2916 int m_indexOfTailOfLastWatchpoint;
2921 #endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)