2 * Copyright (C) 2012, 2014, 2015 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef MacroAssemblerARM64_h
27 #define MacroAssemblerARM64_h
31 #include "ARM64Assembler.h"
32 #include "AbstractMacroAssembler.h"
33 #include <wtf/MathExtras.h>
34 #include <wtf/Optional.h>
38 class MacroAssemblerARM64 : public AbstractMacroAssembler<ARM64Assembler, MacroAssemblerARM64> {
40 static const unsigned numGPRs = 32;
41 static const unsigned numFPRs = 32;
43 static const RegisterID dataTempRegister = ARM64Registers::ip0;
44 static const RegisterID memoryTempRegister = ARM64Registers::ip1;
46 RegisterID scratchRegister()
48 RELEASE_ASSERT(m_allowScratchRegister);
49 return getCachedDataTempRegisterIDAndInvalidate();
53 static const ARM64Registers::FPRegisterID fpTempRegister = ARM64Registers::q31;
54 static const ARM64Assembler::SetFlags S = ARM64Assembler::S;
55 static const intptr_t maskHalfWord0 = 0xffffl;
56 static const intptr_t maskHalfWord1 = 0xffff0000l;
57 static const intptr_t maskUpperWord = 0xffffffff00000000l;
59 // 4 instructions - 3 to load the function pointer, + blr.
60 static const ptrdiff_t REPATCH_OFFSET_CALL_TO_POINTER = -16;
64 : m_dataMemoryTempRegister(this, dataTempRegister)
65 , m_cachedMemoryTempRegister(this, memoryTempRegister)
66 , m_makeJumpPatchable(false)
70 typedef ARM64Assembler::LinkRecord LinkRecord;
71 typedef ARM64Assembler::JumpType JumpType;
72 typedef ARM64Assembler::JumpLinkType JumpLinkType;
73 typedef ARM64Assembler::Condition Condition;
75 static const ARM64Assembler::Condition DefaultCondition = ARM64Assembler::ConditionInvalid;
76 static const ARM64Assembler::JumpType DefaultJump = ARM64Assembler::JumpNoConditionFixedSize;
78 Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); }
79 void* unlinkedCode() { return m_assembler.unlinkedCode(); }
80 static bool canCompact(JumpType jumpType) { return ARM64Assembler::canCompact(jumpType); }
81 static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return ARM64Assembler::computeJumpType(jumpType, from, to); }
82 static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return ARM64Assembler::computeJumpType(record, from, to); }
83 static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return ARM64Assembler::jumpSizeDelta(jumpType, jumpLinkType); }
84 static void link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction, uint8_t* to) { return ARM64Assembler::link(record, from, fromInstruction, to); }
86 static const Scale ScalePtr = TimesEight;
88 static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
90 // This is the largest 32-bit access allowed, aligned to 64-bit boundary.
91 return !(value & ~0x3ff8);
94 enum RelationalCondition {
95 Equal = ARM64Assembler::ConditionEQ,
96 NotEqual = ARM64Assembler::ConditionNE,
97 Above = ARM64Assembler::ConditionHI,
98 AboveOrEqual = ARM64Assembler::ConditionHS,
99 Below = ARM64Assembler::ConditionLO,
100 BelowOrEqual = ARM64Assembler::ConditionLS,
101 GreaterThan = ARM64Assembler::ConditionGT,
102 GreaterThanOrEqual = ARM64Assembler::ConditionGE,
103 LessThan = ARM64Assembler::ConditionLT,
104 LessThanOrEqual = ARM64Assembler::ConditionLE
107 enum ResultCondition {
108 Overflow = ARM64Assembler::ConditionVS,
109 Signed = ARM64Assembler::ConditionMI,
110 PositiveOrZero = ARM64Assembler::ConditionPL,
111 Zero = ARM64Assembler::ConditionEQ,
112 NonZero = ARM64Assembler::ConditionNE
116 IsZero = ARM64Assembler::ConditionEQ,
117 IsNonZero = ARM64Assembler::ConditionNE
120 enum DoubleCondition {
121 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
122 DoubleEqual = ARM64Assembler::ConditionEQ,
123 DoubleNotEqual = ARM64Assembler::ConditionVC, // Not the right flag! check for this & handle differently.
124 DoubleGreaterThan = ARM64Assembler::ConditionGT,
125 DoubleGreaterThanOrEqual = ARM64Assembler::ConditionGE,
126 DoubleLessThan = ARM64Assembler::ConditionLO,
127 DoubleLessThanOrEqual = ARM64Assembler::ConditionLS,
128 // If either operand is NaN, these conditions always evaluate to true.
129 DoubleEqualOrUnordered = ARM64Assembler::ConditionVS, // Not the right flag! check for this & handle differently.
130 DoubleNotEqualOrUnordered = ARM64Assembler::ConditionNE,
131 DoubleGreaterThanOrUnordered = ARM64Assembler::ConditionHI,
132 DoubleGreaterThanOrEqualOrUnordered = ARM64Assembler::ConditionHS,
133 DoubleLessThanOrUnordered = ARM64Assembler::ConditionLT,
134 DoubleLessThanOrEqualOrUnordered = ARM64Assembler::ConditionLE,
137 static const RegisterID stackPointerRegister = ARM64Registers::sp;
138 static const RegisterID framePointerRegister = ARM64Registers::fp;
139 static const RegisterID linkRegister = ARM64Registers::lr;
141 // FIXME: Get reasonable implementations for these
142 static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
143 static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; }
145 // Integer operations:
147 void add32(RegisterID a, RegisterID b, RegisterID dest)
149 ASSERT(a != ARM64Registers::sp && b != ARM64Registers::sp);
150 m_assembler.add<32>(dest, a, b);
153 void add32(RegisterID src, RegisterID dest)
155 m_assembler.add<32>(dest, dest, src);
158 void add32(TrustedImm32 imm, RegisterID dest)
160 add32(imm, dest, dest);
163 void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
165 if (isUInt12(imm.m_value))
166 m_assembler.add<32>(dest, src, UInt12(imm.m_value));
167 else if (isUInt12(-imm.m_value))
168 m_assembler.sub<32>(dest, src, UInt12(-imm.m_value));
170 move(imm, getCachedDataTempRegisterIDAndInvalidate());
171 m_assembler.add<32>(dest, src, dataTempRegister);
175 void add32(TrustedImm32 imm, Address address)
177 load32(address, getCachedDataTempRegisterIDAndInvalidate());
179 if (isUInt12(imm.m_value))
180 m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
181 else if (isUInt12(-imm.m_value))
182 m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
184 move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
185 m_assembler.add<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
188 store32(dataTempRegister, address);
191 void add32(TrustedImm32 imm, AbsoluteAddress address)
193 load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
195 if (isUInt12(imm.m_value)) {
196 m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
197 store32(dataTempRegister, address.m_ptr);
201 if (isUInt12(-imm.m_value)) {
202 m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
203 store32(dataTempRegister, address.m_ptr);
207 move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
208 m_assembler.add<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
209 store32(dataTempRegister, address.m_ptr);
212 void add32(Address src, RegisterID dest)
214 load32(src, getCachedDataTempRegisterIDAndInvalidate());
215 add32(dataTempRegister, dest);
218 void add64(RegisterID a, RegisterID b, RegisterID dest)
220 ASSERT(a != ARM64Registers::sp || b != ARM64Registers::sp);
221 if (b == ARM64Registers::sp)
223 m_assembler.add<64>(dest, a, b);
226 void add64(RegisterID src, RegisterID dest)
228 if (src == ARM64Registers::sp)
229 m_assembler.add<64>(dest, src, dest);
231 m_assembler.add<64>(dest, dest, src);
234 void add64(TrustedImm32 imm, RegisterID dest)
236 if (isUInt12(imm.m_value)) {
237 m_assembler.add<64>(dest, dest, UInt12(imm.m_value));
240 if (isUInt12(-imm.m_value)) {
241 m_assembler.sub<64>(dest, dest, UInt12(-imm.m_value));
245 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
246 m_assembler.add<64>(dest, dest, dataTempRegister);
249 void add64(TrustedImm64 imm, RegisterID dest)
251 intptr_t immediate = imm.m_value;
253 if (isUInt12(immediate)) {
254 m_assembler.add<64>(dest, dest, UInt12(static_cast<int32_t>(immediate)));
257 if (isUInt12(-immediate)) {
258 m_assembler.sub<64>(dest, dest, UInt12(static_cast<int32_t>(-immediate)));
262 move(imm, getCachedDataTempRegisterIDAndInvalidate());
263 m_assembler.add<64>(dest, dest, dataTempRegister);
266 void add64(TrustedImm32 imm, RegisterID src, RegisterID dest)
268 if (isUInt12(imm.m_value)) {
269 m_assembler.add<64>(dest, src, UInt12(imm.m_value));
272 if (isUInt12(-imm.m_value)) {
273 m_assembler.sub<64>(dest, src, UInt12(-imm.m_value));
277 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
278 m_assembler.add<64>(dest, src, dataTempRegister);
281 void add64(TrustedImm32 imm, Address address)
283 load64(address, getCachedDataTempRegisterIDAndInvalidate());
285 if (isUInt12(imm.m_value))
286 m_assembler.add<64>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
287 else if (isUInt12(-imm.m_value))
288 m_assembler.sub<64>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
290 signExtend32ToPtr(imm, getCachedMemoryTempRegisterIDAndInvalidate());
291 m_assembler.add<64>(dataTempRegister, dataTempRegister, memoryTempRegister);
294 store64(dataTempRegister, address);
297 void add64(TrustedImm32 imm, AbsoluteAddress address)
299 load64(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
301 if (isUInt12(imm.m_value)) {
302 m_assembler.add<64>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
303 store64(dataTempRegister, address.m_ptr);
307 if (isUInt12(-imm.m_value)) {
308 m_assembler.sub<64>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
309 store64(dataTempRegister, address.m_ptr);
313 signExtend32ToPtr(imm, getCachedMemoryTempRegisterIDAndInvalidate());
314 m_assembler.add<64>(dataTempRegister, dataTempRegister, memoryTempRegister);
315 store64(dataTempRegister, address.m_ptr);
318 void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest)
323 void add64(Address src, RegisterID dest)
325 load64(src, getCachedDataTempRegisterIDAndInvalidate());
326 m_assembler.add<64>(dest, dest, dataTempRegister);
329 void add64(AbsoluteAddress src, RegisterID dest)
331 load64(src.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
332 m_assembler.add<64>(dest, dest, dataTempRegister);
335 void and32(RegisterID src, RegisterID dest)
337 and32(dest, src, dest);
340 void and32(RegisterID op1, RegisterID op2, RegisterID dest)
342 m_assembler.and_<32>(dest, op1, op2);
345 void and32(TrustedImm32 imm, RegisterID dest)
347 and32(imm, dest, dest);
350 void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
352 LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
354 if (logicalImm.isValid()) {
355 m_assembler.and_<32>(dest, src, logicalImm);
359 move(imm, getCachedDataTempRegisterIDAndInvalidate());
360 m_assembler.and_<32>(dest, src, dataTempRegister);
363 void and32(Address src, RegisterID dest)
365 load32(src, dataTempRegister);
366 and32(dataTempRegister, dest);
369 void and64(RegisterID src1, RegisterID src2, RegisterID dest)
371 m_assembler.and_<64>(dest, src1, src2);
374 void and64(TrustedImm64 imm, RegisterID src, RegisterID dest)
376 LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value);
378 if (logicalImm.isValid()) {
379 m_assembler.and_<64>(dest, src, logicalImm);
383 move(imm, getCachedDataTempRegisterIDAndInvalidate());
384 m_assembler.and_<64>(dest, src, dataTempRegister);
387 void and64(RegisterID src, RegisterID dest)
389 m_assembler.and_<64>(dest, dest, src);
392 void and64(TrustedImm32 imm, RegisterID dest)
394 LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
396 if (logicalImm.isValid()) {
397 m_assembler.and_<64>(dest, dest, logicalImm);
401 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
402 m_assembler.and_<64>(dest, dest, dataTempRegister);
405 void and64(TrustedImmPtr imm, RegisterID dest)
407 LogicalImmediate logicalImm = LogicalImmediate::create64(reinterpret_cast<uint64_t>(imm.m_value));
409 if (logicalImm.isValid()) {
410 m_assembler.and_<64>(dest, dest, logicalImm);
414 move(imm, getCachedDataTempRegisterIDAndInvalidate());
415 m_assembler.and_<64>(dest, dest, dataTempRegister);
418 void countLeadingZeros32(RegisterID src, RegisterID dest)
420 m_assembler.clz<32>(dest, src);
423 void countLeadingZeros64(RegisterID src, RegisterID dest)
425 m_assembler.clz<64>(dest, src);
428 void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
430 m_assembler.lsl<32>(dest, src, shiftAmount);
433 void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
435 m_assembler.lsl<32>(dest, src, imm.m_value & 0x1f);
438 void lshift32(RegisterID shiftAmount, RegisterID dest)
440 lshift32(dest, shiftAmount, dest);
443 void lshift32(TrustedImm32 imm, RegisterID dest)
445 lshift32(dest, imm, dest);
448 void lshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
450 m_assembler.lsl<64>(dest, src, shiftAmount);
453 void lshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
455 m_assembler.lsl<64>(dest, src, imm.m_value & 0x3f);
458 void lshift64(RegisterID shiftAmount, RegisterID dest)
460 lshift64(dest, shiftAmount, dest);
463 void lshift64(TrustedImm32 imm, RegisterID dest)
465 lshift64(dest, imm, dest);
468 void mul32(RegisterID left, RegisterID right, RegisterID dest)
470 m_assembler.mul<32>(dest, left, right);
473 void mul32(RegisterID src, RegisterID dest)
475 m_assembler.mul<32>(dest, dest, src);
478 void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
480 move(imm, getCachedDataTempRegisterIDAndInvalidate());
481 m_assembler.mul<32>(dest, src, dataTempRegister);
484 void mul64(RegisterID src, RegisterID dest)
486 m_assembler.mul<64>(dest, dest, src);
489 void mul64(RegisterID left, RegisterID right, RegisterID dest)
491 m_assembler.mul<64>(dest, left, right);
494 void multiplyAdd32(RegisterID mulLeft, RegisterID mulRight, RegisterID summand, RegisterID dest)
496 m_assembler.madd<32>(dest, mulLeft, mulRight, summand);
499 void multiplySub32(RegisterID mulLeft, RegisterID mulRight, RegisterID minuend, RegisterID dest)
501 m_assembler.msub<32>(dest, mulLeft, mulRight, minuend);
504 void multiplyNeg32(RegisterID mulLeft, RegisterID mulRight, RegisterID dest)
506 m_assembler.msub<32>(dest, mulLeft, mulRight, ARM64Registers::zr);
509 void multiplyAdd64(RegisterID mulLeft, RegisterID mulRight, RegisterID summand, RegisterID dest)
511 m_assembler.madd<64>(dest, mulLeft, mulRight, summand);
514 void multiplySub64(RegisterID mulLeft, RegisterID mulRight, RegisterID minuend, RegisterID dest)
516 m_assembler.msub<64>(dest, mulLeft, mulRight, minuend);
519 void multiplyNeg64(RegisterID mulLeft, RegisterID mulRight, RegisterID dest)
521 m_assembler.msub<64>(dest, mulLeft, mulRight, ARM64Registers::zr);
524 void div32(RegisterID dividend, RegisterID divisor, RegisterID dest)
526 m_assembler.sdiv<32>(dest, dividend, divisor);
529 void div64(RegisterID dividend, RegisterID divisor, RegisterID dest)
531 m_assembler.sdiv<64>(dest, dividend, divisor);
534 void neg32(RegisterID dest)
536 m_assembler.neg<32>(dest, dest);
539 void neg64(RegisterID dest)
541 m_assembler.neg<64>(dest, dest);
544 void or32(RegisterID src, RegisterID dest)
546 or32(dest, src, dest);
549 void or32(RegisterID op1, RegisterID op2, RegisterID dest)
551 m_assembler.orr<32>(dest, op1, op2);
554 void or32(TrustedImm32 imm, RegisterID dest)
556 or32(imm, dest, dest);
559 void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
561 LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
563 if (logicalImm.isValid()) {
564 m_assembler.orr<32>(dest, src, logicalImm);
568 ASSERT(src != dataTempRegister);
569 move(imm, getCachedDataTempRegisterIDAndInvalidate());
570 m_assembler.orr<32>(dest, src, dataTempRegister);
573 void or32(RegisterID src, AbsoluteAddress address)
575 load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
576 m_assembler.orr<32>(dataTempRegister, dataTempRegister, src);
577 store32(dataTempRegister, address.m_ptr);
580 void or32(TrustedImm32 imm, AbsoluteAddress address)
582 LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
583 if (logicalImm.isValid()) {
584 load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
585 m_assembler.orr<32>(dataTempRegister, dataTempRegister, logicalImm);
586 store32(dataTempRegister, address.m_ptr);
588 load32(address.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
589 or32(imm, memoryTempRegister, getCachedDataTempRegisterIDAndInvalidate());
590 store32(dataTempRegister, address.m_ptr);
594 void or32(TrustedImm32 imm, Address address)
596 load32(address, getCachedDataTempRegisterIDAndInvalidate());
597 or32(imm, dataTempRegister, dataTempRegister);
598 store32(dataTempRegister, address);
601 void or64(RegisterID src, RegisterID dest)
603 or64(dest, src, dest);
606 void or64(RegisterID op1, RegisterID op2, RegisterID dest)
608 m_assembler.orr<64>(dest, op1, op2);
611 void or64(TrustedImm32 imm, RegisterID dest)
613 or64(imm, dest, dest);
616 void or64(TrustedImm32 imm, RegisterID src, RegisterID dest)
618 LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
620 if (logicalImm.isValid()) {
621 m_assembler.orr<64>(dest, src, logicalImm);
625 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
626 m_assembler.orr<64>(dest, src, dataTempRegister);
629 void or64(TrustedImm64 imm, RegisterID src, RegisterID dest)
631 LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value);
633 if (logicalImm.isValid()) {
634 m_assembler.orr<64>(dest, src, logicalImm);
638 move(imm, getCachedDataTempRegisterIDAndInvalidate());
639 m_assembler.orr<64>(dest, src, dataTempRegister);
642 void or64(TrustedImm64 imm, RegisterID dest)
644 LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
646 if (logicalImm.isValid()) {
647 m_assembler.orr<64>(dest, dest, logicalImm);
651 move(imm, getCachedDataTempRegisterIDAndInvalidate());
652 m_assembler.orr<64>(dest, dest, dataTempRegister);
655 void rotateRight64(TrustedImm32 imm, RegisterID srcDst)
657 m_assembler.ror<64>(srcDst, srcDst, imm.m_value & 63);
660 void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
662 m_assembler.asr<32>(dest, src, shiftAmount);
665 void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
667 m_assembler.asr<32>(dest, src, imm.m_value & 0x1f);
670 void rshift32(RegisterID shiftAmount, RegisterID dest)
672 rshift32(dest, shiftAmount, dest);
675 void rshift32(TrustedImm32 imm, RegisterID dest)
677 rshift32(dest, imm, dest);
680 void rshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
682 m_assembler.asr<64>(dest, src, shiftAmount);
685 void rshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
687 m_assembler.asr<64>(dest, src, imm.m_value & 0x3f);
690 void rshift64(RegisterID shiftAmount, RegisterID dest)
692 rshift64(dest, shiftAmount, dest);
695 void rshift64(TrustedImm32 imm, RegisterID dest)
697 rshift64(dest, imm, dest);
700 void sub32(RegisterID src, RegisterID dest)
702 m_assembler.sub<32>(dest, dest, src);
705 void sub32(TrustedImm32 imm, RegisterID dest)
707 if (isUInt12(imm.m_value)) {
708 m_assembler.sub<32>(dest, dest, UInt12(imm.m_value));
711 if (isUInt12(-imm.m_value)) {
712 m_assembler.add<32>(dest, dest, UInt12(-imm.m_value));
716 move(imm, getCachedDataTempRegisterIDAndInvalidate());
717 m_assembler.sub<32>(dest, dest, dataTempRegister);
720 void sub32(TrustedImm32 imm, Address address)
722 load32(address, getCachedDataTempRegisterIDAndInvalidate());
724 if (isUInt12(imm.m_value))
725 m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
726 else if (isUInt12(-imm.m_value))
727 m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
729 move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
730 m_assembler.sub<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
733 store32(dataTempRegister, address);
736 void sub32(TrustedImm32 imm, AbsoluteAddress address)
738 load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
740 if (isUInt12(imm.m_value)) {
741 m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
742 store32(dataTempRegister, address.m_ptr);
746 if (isUInt12(-imm.m_value)) {
747 m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
748 store32(dataTempRegister, address.m_ptr);
752 move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
753 m_assembler.sub<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
754 store32(dataTempRegister, address.m_ptr);
757 void sub32(Address src, RegisterID dest)
759 load32(src, getCachedDataTempRegisterIDAndInvalidate());
760 sub32(dataTempRegister, dest);
763 void sub64(RegisterID src, RegisterID dest)
765 m_assembler.sub<64>(dest, dest, src);
768 void sub64(RegisterID a, RegisterID b, RegisterID dest)
770 m_assembler.sub<64>(dest, a, b);
773 void sub64(TrustedImm32 imm, RegisterID dest)
775 if (isUInt12(imm.m_value)) {
776 m_assembler.sub<64>(dest, dest, UInt12(imm.m_value));
779 if (isUInt12(-imm.m_value)) {
780 m_assembler.add<64>(dest, dest, UInt12(-imm.m_value));
784 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
785 m_assembler.sub<64>(dest, dest, dataTempRegister);
788 void sub64(TrustedImm64 imm, RegisterID dest)
790 intptr_t immediate = imm.m_value;
792 if (isUInt12(immediate)) {
793 m_assembler.sub<64>(dest, dest, UInt12(static_cast<int32_t>(immediate)));
796 if (isUInt12(-immediate)) {
797 m_assembler.add<64>(dest, dest, UInt12(static_cast<int32_t>(-immediate)));
801 move(imm, getCachedDataTempRegisterIDAndInvalidate());
802 m_assembler.sub<64>(dest, dest, dataTempRegister);
805 void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
807 m_assembler.lsr<32>(dest, src, shiftAmount);
810 void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
812 m_assembler.lsr<32>(dest, src, imm.m_value & 0x1f);
815 void urshift32(RegisterID shiftAmount, RegisterID dest)
817 urshift32(dest, shiftAmount, dest);
820 void urshift32(TrustedImm32 imm, RegisterID dest)
822 urshift32(dest, imm, dest);
825 void urshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
827 m_assembler.lsr<64>(dest, src, shiftAmount);
830 void urshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
832 m_assembler.lsr<64>(dest, src, imm.m_value & 0x3f);
835 void urshift64(RegisterID shiftAmount, RegisterID dest)
837 urshift64(dest, shiftAmount, dest);
840 void urshift64(TrustedImm32 imm, RegisterID dest)
842 urshift64(dest, imm, dest);
845 void xor32(RegisterID src, RegisterID dest)
847 xor32(dest, src, dest);
850 void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
852 m_assembler.eor<32>(dest, op1, op2);
855 void xor32(TrustedImm32 imm, RegisterID dest)
857 xor32(imm, dest, dest);
860 void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
862 if (imm.m_value == -1)
863 m_assembler.mvn<32>(dest, src);
865 LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
867 if (logicalImm.isValid()) {
868 m_assembler.eor<32>(dest, src, logicalImm);
872 move(imm, getCachedDataTempRegisterIDAndInvalidate());
873 m_assembler.eor<32>(dest, src, dataTempRegister);
877 void xor64(RegisterID src, Address address)
879 load64(address, getCachedDataTempRegisterIDAndInvalidate());
880 m_assembler.eor<64>(dataTempRegister, dataTempRegister, src);
881 store64(dataTempRegister, address);
884 void xor64(RegisterID src, RegisterID dest)
886 xor64(dest, src, dest);
889 void xor64(RegisterID op1, RegisterID op2, RegisterID dest)
891 m_assembler.eor<64>(dest, op1, op2);
894 void xor64(TrustedImm32 imm, RegisterID dest)
896 xor64(imm, dest, dest);
899 void xor64(TrustedImm64 imm, RegisterID src, RegisterID dest)
901 if (imm.m_value == -1)
902 m_assembler.mvn<64>(dest, src);
904 LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value);
906 if (logicalImm.isValid()) {
907 m_assembler.eor<64>(dest, src, logicalImm);
911 move(imm, getCachedDataTempRegisterIDAndInvalidate());
912 m_assembler.eor<64>(dest, src, dataTempRegister);
916 void xor64(TrustedImm32 imm, RegisterID src, RegisterID dest)
918 if (imm.m_value == -1)
919 m_assembler.mvn<64>(dest, src);
921 LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
923 if (logicalImm.isValid()) {
924 m_assembler.eor<64>(dest, src, logicalImm);
928 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
929 m_assembler.eor<64>(dest, src, dataTempRegister);
933 void not32(RegisterID src, RegisterID dest)
935 m_assembler.mvn<32>(dest, src);
938 void not64(RegisterID src, RegisterID dest)
940 m_assembler.mvn<64>(dest, src);
943 // Memory access operations:
945 void load64(ImplicitAddress address, RegisterID dest)
947 if (tryLoadWithOffset<64>(dest, address.base, address.offset))
950 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
951 m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
954 void load64(BaseIndex address, RegisterID dest)
956 if (!address.offset && (!address.scale || address.scale == 3)) {
957 m_assembler.ldr<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
961 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
962 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
963 m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
966 void load64(const void* address, RegisterID dest)
968 load<64>(address, dest);
971 void load64(RegisterID src, PostIndex simm, RegisterID dest)
973 m_assembler.ldr<64>(dest, src, simm);
976 DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest)
978 DataLabel32 label(this);
979 signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
980 m_assembler.ldr<64>(dest, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
984 DataLabelCompact load64WithCompactAddressOffsetPatch(Address address, RegisterID dest)
986 ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
987 DataLabelCompact label(this);
988 m_assembler.ldr<64>(dest, address.base, address.offset);
992 void loadPair64(RegisterID src, RegisterID dest1, RegisterID dest2)
994 loadPair64(src, TrustedImm32(0), dest1, dest2);
997 void loadPair64(RegisterID src, TrustedImm32 offset, RegisterID dest1, RegisterID dest2)
999 m_assembler.ldp<64>(dest1, dest2, src, offset.m_value);
1002 void loadPair64WithNonTemporalAccess(RegisterID src, RegisterID dest1, RegisterID dest2)
1004 loadPair64WithNonTemporalAccess(src, TrustedImm32(0), dest1, dest2);
1007 void loadPair64WithNonTemporalAccess(RegisterID src, TrustedImm32 offset, RegisterID dest1, RegisterID dest2)
1009 m_assembler.ldnp<64>(dest1, dest2, src, offset.m_value);
1012 void abortWithReason(AbortReason reason)
1014 move(TrustedImm32(reason), dataTempRegister);
1018 void abortWithReason(AbortReason reason, intptr_t misc)
1020 move(TrustedImm64(misc), memoryTempRegister);
1021 abortWithReason(reason);
1024 ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
1026 ConvertibleLoadLabel result(this);
1027 ASSERT(!(address.offset & ~0xff8));
1028 m_assembler.ldr<64>(dest, address.base, address.offset);
1032 void load32(ImplicitAddress address, RegisterID dest)
1034 if (tryLoadWithOffset<32>(dest, address.base, address.offset))
1037 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1038 m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
1041 void load32(BaseIndex address, RegisterID dest)
1043 if (!address.offset && (!address.scale || address.scale == 2)) {
1044 m_assembler.ldr<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1048 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1049 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1050 m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
1053 void load32(const void* address, RegisterID dest)
1055 load<32>(address, dest);
1058 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
1060 DataLabel32 label(this);
1061 signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
1062 m_assembler.ldr<32>(dest, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
1066 DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
1068 ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
1069 DataLabelCompact label(this);
1070 m_assembler.ldr<32>(dest, address.base, address.offset);
1074 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
1076 load32(address, dest);
1079 void load16(ImplicitAddress address, RegisterID dest)
1081 if (tryLoadWithOffset<16>(dest, address.base, address.offset))
1084 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1085 m_assembler.ldrh(dest, address.base, memoryTempRegister);
1088 void load16(BaseIndex address, RegisterID dest)
1090 if (!address.offset && (!address.scale || address.scale == 1)) {
1091 m_assembler.ldrh(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1095 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1096 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1097 m_assembler.ldrh(dest, address.base, memoryTempRegister);
1100 void load16Unaligned(BaseIndex address, RegisterID dest)
1102 load16(address, dest);
1105 void load16SignedExtendTo32(ImplicitAddress address, RegisterID dest)
1107 if (tryLoadSignedWithOffset<16>(dest, address.base, address.offset))
1110 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1111 m_assembler.ldrsh<32>(dest, address.base, memoryTempRegister);
1114 void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
1116 if (!address.offset && (!address.scale || address.scale == 1)) {
1117 m_assembler.ldrsh<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1121 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1122 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1123 m_assembler.ldrsh<32>(dest, address.base, memoryTempRegister);
1126 void zeroExtend16To32(RegisterID src, RegisterID dest)
1128 m_assembler.uxth<32>(dest, src);
1131 void signExtend16To32(RegisterID src, RegisterID dest)
1133 m_assembler.sxth<32>(dest, src);
1136 void load8(ImplicitAddress address, RegisterID dest)
1138 if (tryLoadWithOffset<8>(dest, address.base, address.offset))
1141 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1142 m_assembler.ldrb(dest, address.base, memoryTempRegister);
1145 void load8(BaseIndex address, RegisterID dest)
1147 if (!address.offset && !address.scale) {
1148 m_assembler.ldrb(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1152 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1153 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1154 m_assembler.ldrb(dest, address.base, memoryTempRegister);
1157 void load8(const void* address, RegisterID dest)
1159 moveToCachedReg(TrustedImmPtr(address), cachedMemoryTempRegister());
1160 m_assembler.ldrb(dest, memoryTempRegister, ARM64Registers::zr);
1161 if (dest == memoryTempRegister)
1162 cachedMemoryTempRegister().invalidate();
1165 void load8(RegisterID src, PostIndex simm, RegisterID dest)
1167 m_assembler.ldrb(dest, src, simm);
1170 void load8SignedExtendTo32(ImplicitAddress address, RegisterID dest)
1172 if (tryLoadSignedWithOffset<8>(dest, address.base, address.offset))
1175 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1176 m_assembler.ldrsb<32>(dest, address.base, memoryTempRegister);
1179 void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
1181 if (!address.offset && !address.scale) {
1182 m_assembler.ldrsb<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1186 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1187 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1188 m_assembler.ldrsb<32>(dest, address.base, memoryTempRegister);
1191 void zeroExtend8To32(RegisterID src, RegisterID dest)
1193 m_assembler.uxtb<32>(dest, src);
1196 void signExtend8To32(RegisterID src, RegisterID dest)
1198 m_assembler.sxtb<32>(dest, src);
1201 void store64(RegisterID src, ImplicitAddress address)
1203 if (tryStoreWithOffset<64>(src, address.base, address.offset))
1206 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1207 m_assembler.str<64>(src, address.base, memoryTempRegister);
1210 void store64(RegisterID src, BaseIndex address)
1212 if (!address.offset && (!address.scale || address.scale == 3)) {
1213 m_assembler.str<64>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1217 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1218 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1219 m_assembler.str<64>(src, address.base, memoryTempRegister);
1222 void store64(RegisterID src, const void* address)
1224 store<64>(src, address);
1227 void store64(TrustedImm32 imm, ImplicitAddress address)
1229 store64(TrustedImm64(imm.m_value), address);
1232 void store64(TrustedImm64 imm, ImplicitAddress address)
1235 store64(ARM64Registers::zr, address);
1239 moveToCachedReg(imm, dataMemoryTempRegister());
1240 store64(dataTempRegister, address);
1243 void store64(TrustedImm64 imm, BaseIndex address)
1246 store64(ARM64Registers::zr, address);
1250 moveToCachedReg(imm, dataMemoryTempRegister());
1251 store64(dataTempRegister, address);
1254 void store64(RegisterID src, RegisterID dest, PostIndex simm)
1256 m_assembler.str<64>(src, dest, simm);
1259 DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address)
1261 DataLabel32 label(this);
1262 signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
1263 m_assembler.str<64>(src, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
1267 void storePair64(RegisterID src1, RegisterID src2, RegisterID dest)
1269 storePair64(src1, src2, dest, TrustedImm32(0));
1272 void storePair64(RegisterID src1, RegisterID src2, RegisterID dest, TrustedImm32 offset)
1274 m_assembler.stp<64>(src1, src2, dest, offset.m_value);
1277 void storePair64WithNonTemporalAccess(RegisterID src1, RegisterID src2, RegisterID dest)
1279 storePair64WithNonTemporalAccess(src1, src2, dest, TrustedImm32(0));
1282 void storePair64WithNonTemporalAccess(RegisterID src1, RegisterID src2, RegisterID dest, TrustedImm32 offset)
1284 m_assembler.stnp<64>(src1, src2, dest, offset.m_value);
1287 void store32(RegisterID src, ImplicitAddress address)
1289 if (tryStoreWithOffset<32>(src, address.base, address.offset))
1292 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1293 m_assembler.str<32>(src, address.base, memoryTempRegister);
1296 void store32(RegisterID src, BaseIndex address)
1298 if (!address.offset && (!address.scale || address.scale == 2)) {
1299 m_assembler.str<32>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1303 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1304 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1305 m_assembler.str<32>(src, address.base, memoryTempRegister);
1308 void store32(RegisterID src, const void* address)
1310 store<32>(src, address);
1313 void store32(TrustedImm32 imm, ImplicitAddress address)
1316 store32(ARM64Registers::zr, address);
1320 moveToCachedReg(imm, dataMemoryTempRegister());
1321 store32(dataTempRegister, address);
1324 void store32(TrustedImm32 imm, BaseIndex address)
1327 store32(ARM64Registers::zr, address);
1331 moveToCachedReg(imm, dataMemoryTempRegister());
1332 store32(dataTempRegister, address);
1335 void store32(TrustedImm32 imm, const void* address)
1338 store32(ARM64Registers::zr, address);
1342 moveToCachedReg(imm, dataMemoryTempRegister());
1343 store32(dataTempRegister, address);
1346 void storeZero32(ImplicitAddress address)
1348 store32(ARM64Registers::zr, address);
1351 void storeZero32(BaseIndex address)
1353 store32(ARM64Registers::zr, address);
1356 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
1358 DataLabel32 label(this);
1359 signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
1360 m_assembler.str<32>(src, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
1364 void store16(RegisterID src, ImplicitAddress address)
1366 if (tryStoreWithOffset<16>(src, address.base, address.offset))
1369 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1370 m_assembler.strh(src, address.base, memoryTempRegister);
1373 void store16(RegisterID src, BaseIndex address)
1375 if (!address.offset && (!address.scale || address.scale == 1)) {
1376 m_assembler.strh(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1380 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1381 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1382 m_assembler.strh(src, address.base, memoryTempRegister);
1385 void store8(RegisterID src, BaseIndex address)
1387 if (!address.offset && !address.scale) {
1388 m_assembler.strb(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1392 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1393 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1394 m_assembler.strb(src, address.base, memoryTempRegister);
1397 void store8(RegisterID src, void* address)
1399 move(TrustedImmPtr(address), getCachedMemoryTempRegisterIDAndInvalidate());
1400 m_assembler.strb(src, memoryTempRegister, 0);
1403 void store8(RegisterID src, ImplicitAddress address)
1405 if (tryStoreWithOffset<8>(src, address.base, address.offset))
1408 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1409 m_assembler.strb(src, address.base, memoryTempRegister);
1412 void store8(TrustedImm32 imm, void* address)
1414 TrustedImm32 imm8(static_cast<int8_t>(imm.m_value));
1415 if (!imm8.m_value) {
1416 store8(ARM64Registers::zr, address);
1420 move(imm8, getCachedDataTempRegisterIDAndInvalidate());
1421 store8(dataTempRegister, address);
1424 void store8(TrustedImm32 imm, ImplicitAddress address)
1426 TrustedImm32 imm8(static_cast<int8_t>(imm.m_value));
1427 if (!imm8.m_value) {
1428 store8(ARM64Registers::zr, address);
1432 move(imm8, getCachedDataTempRegisterIDAndInvalidate());
1433 store8(dataTempRegister, address);
1436 void store8(RegisterID src, RegisterID dest, PostIndex simm)
1438 m_assembler.strb(src, dest, simm);
1441 // Floating-point operations:
1443 static bool supportsFloatingPoint() { return true; }
1444 static bool supportsFloatingPointTruncate() { return true; }
1445 static bool supportsFloatingPointSqrt() { return true; }
1446 static bool supportsFloatingPointAbs() { return true; }
1447 static bool supportsFloatingPointRounding() { return true; }
1449 enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
1451 void absDouble(FPRegisterID src, FPRegisterID dest)
1453 m_assembler.fabs<64>(dest, src);
1456 void absFloat(FPRegisterID src, FPRegisterID dest)
1458 m_assembler.fabs<32>(dest, src);
1461 void addDouble(FPRegisterID src, FPRegisterID dest)
1463 addDouble(dest, src, dest);
1466 void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1468 m_assembler.fadd<64>(dest, op1, op2);
1471 void addDouble(Address src, FPRegisterID dest)
1473 loadDouble(src, fpTempRegister);
1474 addDouble(fpTempRegister, dest);
1477 void addDouble(AbsoluteAddress address, FPRegisterID dest)
1479 loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister);
1480 addDouble(fpTempRegister, dest);
1483 void addFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1485 m_assembler.fadd<32>(dest, op1, op2);
1488 void ceilDouble(FPRegisterID src, FPRegisterID dest)
1490 m_assembler.frintp<64>(dest, src);
1493 void ceilFloat(FPRegisterID src, FPRegisterID dest)
1495 m_assembler.frintp<32>(dest, src);
1498 void floorDouble(FPRegisterID src, FPRegisterID dest)
1500 m_assembler.frintm<64>(dest, src);
1503 void floorFloat(FPRegisterID src, FPRegisterID dest)
1505 m_assembler.frintm<32>(dest, src);
1508 void roundTowardZeroDouble(FPRegisterID src, FPRegisterID dest)
1510 m_assembler.frintz<64>(dest, src);
1513 void roundTowardZeroFloat(FPRegisterID src, FPRegisterID dest)
1515 m_assembler.frintz<32>(dest, src);
1518 // Convert 'src' to an integer, and places the resulting 'dest'.
1519 // If the result is not representable as a 32 bit value, branch.
1520 // May also branch for some values that are representable in 32 bits
1521 // (specifically, in this case, 0).
1522 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true)
1524 m_assembler.fcvtns<32, 64>(dest, src);
1526 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
1527 m_assembler.scvtf<64, 32>(fpTempRegister, dest);
1528 failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister));
1530 // Test for negative zero.
1532 Jump valueIsNonZero = branchTest32(NonZero, dest);
1533 RegisterID scratch = getCachedMemoryTempRegisterIDAndInvalidate();
1534 m_assembler.fmov<64>(scratch, src);
1535 failureCases.append(makeTestBitAndBranch(scratch, 63, IsNonZero));
1536 valueIsNonZero.link(this);
1540 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
1542 m_assembler.fcmp<64>(left, right);
1543 return jumpAfterFloatingPointCompare(cond);
1546 Jump branchFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
1548 m_assembler.fcmp<32>(left, right);
1549 return jumpAfterFloatingPointCompare(cond);
1552 Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
1554 m_assembler.fcmp_0<64>(reg);
1555 Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1556 Jump result = makeBranch(ARM64Assembler::ConditionNE);
1557 unordered.link(this);
1561 Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID)
1563 m_assembler.fcmp_0<64>(reg);
1564 Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1565 Jump notEqual = makeBranch(ARM64Assembler::ConditionNE);
1566 unordered.link(this);
1567 // We get here if either unordered or equal.
1568 Jump result = jump();
1569 notEqual.link(this);
1573 Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
1575 // Truncate to a 64-bit integer in dataTempRegister, copy the low 32-bit to dest.
1576 m_assembler.fcvtzs<64, 64>(getCachedDataTempRegisterIDAndInvalidate(), src);
1577 zeroExtend32ToPtr(dataTempRegister, dest);
1578 // Check thlow 32-bits sign extend to be equal to the full value.
1579 m_assembler.cmp<64>(dataTempRegister, dataTempRegister, ARM64Assembler::SXTW, 0);
1580 return Jump(makeBranch(branchType == BranchIfTruncateSuccessful ? Equal : NotEqual));
1583 void convertDoubleToFloat(FPRegisterID src, FPRegisterID dest)
1585 m_assembler.fcvt<32, 64>(dest, src);
1588 void convertFloatToDouble(FPRegisterID src, FPRegisterID dest)
1590 m_assembler.fcvt<64, 32>(dest, src);
1593 void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest)
1595 move(imm, getCachedDataTempRegisterIDAndInvalidate());
1596 convertInt32ToDouble(dataTempRegister, dest);
1599 void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
1601 m_assembler.scvtf<64, 32>(dest, src);
1604 void convertInt32ToDouble(Address address, FPRegisterID dest)
1606 load32(address, getCachedDataTempRegisterIDAndInvalidate());
1607 convertInt32ToDouble(dataTempRegister, dest);
1610 void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest)
1612 load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1613 convertInt32ToDouble(dataTempRegister, dest);
1616 void convertInt32ToFloat(RegisterID src, FPRegisterID dest)
1618 m_assembler.scvtf<32, 32>(dest, src);
1621 void convertInt64ToDouble(RegisterID src, FPRegisterID dest)
1623 m_assembler.scvtf<64, 64>(dest, src);
1626 void convertInt64ToFloat(RegisterID src, FPRegisterID dest)
1628 m_assembler.scvtf<32, 64>(dest, src);
1631 void divDouble(FPRegisterID src, FPRegisterID dest)
1633 divDouble(dest, src, dest);
1636 void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1638 m_assembler.fdiv<64>(dest, op1, op2);
1641 void divFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1643 m_assembler.fdiv<32>(dest, op1, op2);
1646 void loadDouble(ImplicitAddress address, FPRegisterID dest)
1648 if (tryLoadWithOffset<64>(dest, address.base, address.offset))
1651 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1652 m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
1655 void loadDouble(BaseIndex address, FPRegisterID dest)
1657 if (!address.offset && (!address.scale || address.scale == 3)) {
1658 m_assembler.ldr<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1662 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1663 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1664 m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
1667 void loadDouble(TrustedImmPtr address, FPRegisterID dest)
1669 moveToCachedReg(address, cachedMemoryTempRegister());
1670 m_assembler.ldr<64>(dest, memoryTempRegister, ARM64Registers::zr);
1673 void loadFloat(ImplicitAddress address, FPRegisterID dest)
1675 if (tryLoadWithOffset<32>(dest, address.base, address.offset))
1678 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1679 m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
1682 void loadFloat(BaseIndex address, FPRegisterID dest)
1684 if (!address.offset && (!address.scale || address.scale == 2)) {
1685 m_assembler.ldr<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1689 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1690 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1691 m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
1694 void moveDouble(FPRegisterID src, FPRegisterID dest)
1696 m_assembler.fmov<64>(dest, src);
1699 void moveZeroToDouble(FPRegisterID reg)
1701 m_assembler.fmov<64>(reg, ARM64Registers::zr);
1704 void moveDoubleTo64(FPRegisterID src, RegisterID dest)
1706 m_assembler.fmov<64>(dest, src);
1709 void moveFloatTo32(FPRegisterID src, RegisterID dest)
1711 m_assembler.fmov<32>(dest, src);
1714 void move64ToDouble(RegisterID src, FPRegisterID dest)
1716 m_assembler.fmov<64>(dest, src);
1719 void move32ToFloat(RegisterID src, FPRegisterID dest)
1721 m_assembler.fmov<32>(dest, src);
1724 void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
1726 m_assembler.fcmp<64>(left, right);
1727 moveConditionallyAfterFloatingPointCompare<64>(cond, src, dest);
1730 void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
1732 m_assembler.fcmp<64>(left, right);
1733 moveConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest);
1736 void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
1738 m_assembler.fcmp<32>(left, right);
1739 moveConditionallyAfterFloatingPointCompare<64>(cond, src, dest);
1742 void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
1744 m_assembler.fcmp<32>(left, right);
1745 moveConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest);
1748 template<int datasize>
1749 void moveConditionallyAfterFloatingPointCompare(DoubleCondition cond, RegisterID src, RegisterID dest)
1751 if (cond == DoubleNotEqual) {
1752 Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1753 m_assembler.csel<datasize>(dest, src, dest, ARM64Assembler::ConditionNE);
1754 unordered.link(this);
1757 if (cond == DoubleEqualOrUnordered) {
1758 // If the compare is unordered, src is copied to dest and the
1759 // next csel has all arguments equal to src.
1760 // If the compare is ordered, dest is unchanged and EQ decides
1761 // what value to set.
1762 m_assembler.csel<datasize>(dest, src, dest, ARM64Assembler::ConditionVS);
1763 m_assembler.csel<datasize>(dest, src, dest, ARM64Assembler::ConditionEQ);
1766 m_assembler.csel<datasize>(dest, src, dest, ARM64Condition(cond));
1769 template<int datasize>
1770 void moveConditionallyAfterFloatingPointCompare(DoubleCondition cond, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
1772 if (cond == DoubleNotEqual) {
1773 Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1774 m_assembler.csel<datasize>(dest, thenCase, elseCase, ARM64Assembler::ConditionNE);
1775 unordered.link(this);
1778 if (cond == DoubleEqualOrUnordered) {
1779 // If the compare is unordered, thenCase is copied to elseCase and the
1780 // next csel has all arguments equal to thenCase.
1781 // If the compare is ordered, dest is unchanged and EQ decides
1782 // what value to set.
1783 m_assembler.csel<datasize>(elseCase, thenCase, elseCase, ARM64Assembler::ConditionVS);
1784 m_assembler.csel<datasize>(dest, thenCase, elseCase, ARM64Assembler::ConditionEQ);
1787 m_assembler.csel<datasize>(dest, thenCase, elseCase, ARM64Condition(cond));
1790 template<int datasize>
1791 void moveDoubleConditionallyAfterFloatingPointCompare(DoubleCondition cond, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
1793 if (cond == DoubleNotEqual) {
1794 Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1795 m_assembler.fcsel<datasize>(dest, thenCase, elseCase, ARM64Assembler::ConditionNE);
1796 unordered.link(this);
1799 if (cond == DoubleEqualOrUnordered) {
1800 // If the compare is unordered, thenCase is copied to elseCase and the
1801 // next csel has all arguments equal to thenCase.
1802 // If the compare is ordered, dest is unchanged and EQ decides
1803 // what value to set.
1804 m_assembler.fcsel<datasize>(elseCase, thenCase, elseCase, ARM64Assembler::ConditionVS);
1805 m_assembler.fcsel<datasize>(dest, thenCase, elseCase, ARM64Assembler::ConditionEQ);
1808 m_assembler.fcsel<datasize>(dest, thenCase, elseCase, ARM64Condition(cond));
1811 void moveDoubleConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
1813 m_assembler.fcmp<64>(left, right);
1814 moveDoubleConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest);
1817 void moveDoubleConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
1819 m_assembler.fcmp<32>(left, right);
1820 moveDoubleConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest);
1823 void mulDouble(FPRegisterID src, FPRegisterID dest)
1825 mulDouble(dest, src, dest);
1828 void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1830 m_assembler.fmul<64>(dest, op1, op2);
1833 void mulDouble(Address src, FPRegisterID dest)
1835 loadDouble(src, fpTempRegister);
1836 mulDouble(fpTempRegister, dest);
1839 void mulFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1841 m_assembler.fmul<32>(dest, op1, op2);
1844 void andDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1846 m_assembler.vand<64>(dest, op1, op2);
1849 void andFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1851 andDouble(op1, op2, dest);
1854 void negateDouble(FPRegisterID src, FPRegisterID dest)
1856 m_assembler.fneg<64>(dest, src);
1859 void negateFloat(FPRegisterID src, FPRegisterID dest)
1861 m_assembler.fneg<32>(dest, src);
1864 void sqrtDouble(FPRegisterID src, FPRegisterID dest)
1866 m_assembler.fsqrt<64>(dest, src);
1869 void sqrtFloat(FPRegisterID src, FPRegisterID dest)
1871 m_assembler.fsqrt<32>(dest, src);
1874 void storeDouble(FPRegisterID src, ImplicitAddress address)
1876 if (tryStoreWithOffset<64>(src, address.base, address.offset))
1879 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1880 m_assembler.str<64>(src, address.base, memoryTempRegister);
1883 void storeDouble(FPRegisterID src, TrustedImmPtr address)
1885 moveToCachedReg(address, cachedMemoryTempRegister());
1886 m_assembler.str<64>(src, memoryTempRegister, ARM64Registers::zr);
1889 void storeDouble(FPRegisterID src, BaseIndex address)
1891 if (!address.offset && (!address.scale || address.scale == 3)) {
1892 m_assembler.str<64>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1896 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1897 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1898 m_assembler.str<64>(src, address.base, memoryTempRegister);
1901 void storeFloat(FPRegisterID src, ImplicitAddress address)
1903 if (tryStoreWithOffset<32>(src, address.base, address.offset))
1906 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1907 m_assembler.str<32>(src, address.base, memoryTempRegister);
1910 void storeFloat(FPRegisterID src, BaseIndex address)
1912 if (!address.offset && (!address.scale || address.scale == 2)) {
1913 m_assembler.str<32>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1917 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1918 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1919 m_assembler.str<32>(src, address.base, memoryTempRegister);
1922 void subDouble(FPRegisterID src, FPRegisterID dest)
1924 subDouble(dest, src, dest);
1927 void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1929 m_assembler.fsub<64>(dest, op1, op2);
1932 void subDouble(Address src, FPRegisterID dest)
1934 loadDouble(src, fpTempRegister);
1935 subDouble(fpTempRegister, dest);
1938 void subFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1940 m_assembler.fsub<32>(dest, op1, op2);
1943 // Result is undefined if the value is outside of the integer range.
1944 void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
1946 m_assembler.fcvtzs<32, 64>(dest, src);
1949 void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
1951 m_assembler.fcvtzu<32, 64>(dest, src);
1955 // Stack manipulation operations:
1957 // The ABI is assumed to provide a stack abstraction to memory,
1958 // containing machine word sized units of data. Push and pop
1959 // operations add and remove a single register sized unit of data
1960 // to or from the stack. These operations are not supported on
1961 // ARM64. Peek and poke operations read or write values on the
1962 // stack, without moving the current stack position. Additionally,
1963 // there are popToRestore and pushToSave operations, which are
1964 // designed just for quick-and-dirty saving and restoring of
1965 // temporary values. These operations don't claim to have any
1966 // ABI compatibility.
1968 void pop(RegisterID) NO_RETURN_DUE_TO_CRASH
1973 void push(RegisterID) NO_RETURN_DUE_TO_CRASH
1978 void push(Address) NO_RETURN_DUE_TO_CRASH
1983 void push(TrustedImm32) NO_RETURN_DUE_TO_CRASH
1988 void popPair(RegisterID dest1, RegisterID dest2)
1990 m_assembler.ldp<64>(dest1, dest2, ARM64Registers::sp, PairPostIndex(16));
1993 void pushPair(RegisterID src1, RegisterID src2)
1995 m_assembler.stp<64>(src1, src2, ARM64Registers::sp, PairPreIndex(-16));
1998 void popToRestore(RegisterID dest)
2000 m_assembler.ldr<64>(dest, ARM64Registers::sp, PostIndex(16));
2003 void pushToSave(RegisterID src)
2005 m_assembler.str<64>(src, ARM64Registers::sp, PreIndex(-16));
2008 void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm)
2010 RegisterID reg = dataTempRegister;
2013 store64(reg, stackPointerRegister);
2014 load64(Address(stackPointerRegister, 8), reg);
2017 void pushToSave(Address address)
2019 load32(address, getCachedDataTempRegisterIDAndInvalidate());
2020 pushToSave(dataTempRegister);
2023 void pushToSave(TrustedImm32 imm)
2025 move(imm, getCachedDataTempRegisterIDAndInvalidate());
2026 pushToSave(dataTempRegister);
2029 void popToRestore(FPRegisterID dest)
2031 loadDouble(stackPointerRegister, dest);
2032 add64(TrustedImm32(16), stackPointerRegister);
2035 void pushToSave(FPRegisterID src)
2037 sub64(TrustedImm32(16), stackPointerRegister);
2038 storeDouble(src, stackPointerRegister);
2041 static ptrdiff_t pushToSaveByteOffset() { return 16; }
2043 // Register move operations:
2045 void move(RegisterID src, RegisterID dest)
2048 m_assembler.mov<64>(dest, src);
2051 void move(TrustedImm32 imm, RegisterID dest)
2053 moveInternal<TrustedImm32, int32_t>(imm, dest);
2056 void move(TrustedImmPtr imm, RegisterID dest)
2058 moveInternal<TrustedImmPtr, intptr_t>(imm, dest);
2061 void move(TrustedImm64 imm, RegisterID dest)
2063 moveInternal<TrustedImm64, int64_t>(imm, dest);
2066 void swap(RegisterID reg1, RegisterID reg2)
2068 move(reg1, getCachedDataTempRegisterIDAndInvalidate());
2070 move(dataTempRegister, reg2);
2073 void signExtend32ToPtr(TrustedImm32 imm, RegisterID dest)
2075 move(TrustedImmPtr(reinterpret_cast<void*>(static_cast<intptr_t>(imm.m_value))), dest);
2078 void signExtend32ToPtr(RegisterID src, RegisterID dest)
2080 m_assembler.sxtw(dest, src);
2083 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
2085 m_assembler.uxtw(dest, src);
2088 void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest)
2090 m_assembler.cmp<32>(left, right);
2091 m_assembler.csel<32>(dest, src, dest, ARM64Condition(cond));
2094 void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2096 m_assembler.cmp<32>(left, right);
2097 m_assembler.csel<32>(dest, thenCase, elseCase, ARM64Condition(cond));
2100 void moveConditionally32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2102 if (!right.m_value) {
2103 if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
2104 moveConditionallyTest32(*resultCondition, left, left, thenCase, elseCase, dest);
2109 if (isUInt12(right.m_value))
2110 m_assembler.cmp<32>(left, UInt12(right.m_value));
2111 else if (isUInt12(-right.m_value))
2112 m_assembler.cmn<32>(left, UInt12(-right.m_value));
2114 moveToCachedReg(right, dataMemoryTempRegister());
2115 m_assembler.cmp<32>(left, dataTempRegister);
2117 m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2120 void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest)
2122 m_assembler.cmp<64>(left, right);
2123 m_assembler.csel<64>(dest, src, dest, ARM64Condition(cond));
2126 void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2128 m_assembler.cmp<64>(left, right);
2129 m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2132 void moveConditionally64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2134 if (!right.m_value) {
2135 if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
2136 moveConditionallyTest64(*resultCondition, left, left, thenCase, elseCase, dest);
2141 if (isUInt12(right.m_value))
2142 m_assembler.cmp<64>(left, UInt12(right.m_value));
2143 else if (isUInt12(-right.m_value))
2144 m_assembler.cmn<64>(left, UInt12(-right.m_value));
2146 moveToCachedReg(right, dataMemoryTempRegister());
2147 m_assembler.cmp<64>(left, dataTempRegister);
2149 m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2152 void moveConditionallyTest32(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest)
2154 m_assembler.tst<32>(testReg, mask);
2155 m_assembler.csel<32>(dest, src, dest, ARM64Condition(cond));
2158 void moveConditionallyTest32(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2160 m_assembler.tst<32>(left, right);
2161 m_assembler.csel<32>(dest, thenCase, elseCase, ARM64Condition(cond));
2164 void moveConditionallyTest32(ResultCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2166 test32(left, right);
2167 m_assembler.csel<32>(dest, thenCase, elseCase, ARM64Condition(cond));
2170 void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest)
2172 m_assembler.tst<64>(testReg, mask);
2173 m_assembler.csel<64>(dest, src, dest, ARM64Condition(cond));
2176 void moveConditionallyTest64(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2178 m_assembler.tst<64>(left, right);
2179 m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2182 void moveDoubleConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2184 m_assembler.cmp<32>(left, right);
2185 m_assembler.fcsel<32>(dest, thenCase, elseCase, ARM64Condition(cond));
2188 void moveDoubleConditionally32(RelationalCondition cond, RegisterID left, TrustedImm32 right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2190 if (!right.m_value) {
2191 if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
2192 moveDoubleConditionallyTest32(*resultCondition, left, left, thenCase, elseCase, dest);
2197 if (isUInt12(right.m_value))
2198 m_assembler.cmp<32>(left, UInt12(right.m_value));
2199 else if (isUInt12(-right.m_value))
2200 m_assembler.cmn<32>(left, UInt12(-right.m_value));
2202 moveToCachedReg(right, dataMemoryTempRegister());
2203 m_assembler.cmp<32>(left, dataTempRegister);
2205 m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2208 void moveDoubleConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2210 m_assembler.cmp<64>(left, right);
2211 m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2214 void moveDoubleConditionally64(RelationalCondition cond, RegisterID left, TrustedImm32 right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2216 if (!right.m_value) {
2217 if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
2218 moveDoubleConditionallyTest64(*resultCondition, left, left, thenCase, elseCase, dest);
2223 if (isUInt12(right.m_value))
2224 m_assembler.cmp<64>(left, UInt12(right.m_value));
2225 else if (isUInt12(-right.m_value))
2226 m_assembler.cmn<64>(left, UInt12(-right.m_value));
2228 moveToCachedReg(right, dataMemoryTempRegister());
2229 m_assembler.cmp<64>(left, dataTempRegister);
2231 m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2234 void moveDoubleConditionallyTest32(ResultCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2236 m_assembler.tst<32>(left, right);
2237 m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2240 void moveDoubleConditionallyTest32(ResultCondition cond, RegisterID left, TrustedImm32 right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2242 test32(left, right);
2243 m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2246 void moveDoubleConditionallyTest64(ResultCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2248 m_assembler.tst<64>(left, right);
2249 m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2252 // Forwards / external control flow operations:
2254 // This set of jump and conditional branch operations return a Jump
2255 // object which may linked at a later point, allow forwards jump,
2256 // or jumps that will require external linkage (after the code has been
2259 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
2260 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
2261 // used (representing the names 'below' and 'above').
2263 // Operands to the comparision are provided in the expected order, e.g.
2264 // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
2265 // treated as a signed 32bit value, is less than or equal to 5.
2267 // jz and jnz test whether the first operand is equal to zero, and take
2268 // an optional second operand of a mask under which to perform the test.
2270 Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
2272 m_assembler.cmp<32>(left, right);
2273 return Jump(makeBranch(cond));
2276 Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
2278 if (!right.m_value) {
2279 if (auto resultCondition = commuteCompareToZeroIntoTest(cond))
2280 return branchTest32(*resultCondition, left, left);
2283 if (isUInt12(right.m_value))
2284 m_assembler.cmp<32>(left, UInt12(right.m_value));
2285 else if (isUInt12(-right.m_value))
2286 m_assembler.cmn<32>(left, UInt12(-right.m_value));
2288 moveToCachedReg(right, dataMemoryTempRegister());
2289 m_assembler.cmp<32>(left, dataTempRegister);
2291 return Jump(makeBranch(cond));
2294 Jump branch32(RelationalCondition cond, RegisterID left, Address right)
2296 load32(right, getCachedMemoryTempRegisterIDAndInvalidate());
2297 return branch32(cond, left, memoryTempRegister);
2300 Jump branch32(RelationalCondition cond, Address left, RegisterID right)
2302 load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
2303 return branch32(cond, memoryTempRegister, right);
2306 Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
2308 load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
2309 return branch32(cond, memoryTempRegister, right);
2312 Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
2314 load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
2315 return branch32(cond, memoryTempRegister, right);
2318 Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
2320 load32(left.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
2321 return branch32(cond, dataTempRegister, right);
2324 Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
2326 load32(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
2327 return branch32(cond, memoryTempRegister, right);
2330 Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right)
2332 if (right == ARM64Registers::sp) {
2333 if (cond == Equal && left != ARM64Registers::sp) {
2334 // CMP can only use SP for the left argument, since we are testing for equality, the order
2335 // does not matter here.
2336 std::swap(left, right);
2338 move(right, getCachedDataTempRegisterIDAndInvalidate());
2339 right = dataTempRegister;
2342 m_assembler.cmp<64>(left, right);
2343 return Jump(makeBranch(cond));
2346 Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm32 right)
2348 if (!right.m_value) {
2349 if (auto resultCondition = commuteCompareToZeroIntoTest(cond))
2350 return branchTest64(*resultCondition, left, left);
2353 if (isUInt12(right.m_value))
2354 m_assembler.cmp<64>(left, UInt12(right.m_value));
2355 else if (isUInt12(-right.m_value))
2356 m_assembler.cmn<64>(left, UInt12(-right.m_value));
2358 moveToCachedReg(right, dataMemoryTempRegister());
2359 m_assembler.cmp<64>(left, dataTempRegister);
2361 return Jump(makeBranch(cond));
2364 Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right)
2366 intptr_t immediate = right.m_value;
2368 if (auto resultCondition = commuteCompareToZeroIntoTest(cond))
2369 return branchTest64(*resultCondition, left, left);
2372 if (isUInt12(immediate))
2373 m_assembler.cmp<64>(left, UInt12(static_cast<int32_t>(immediate)));
2374 else if (isUInt12(-immediate))
2375 m_assembler.cmn<64>(left, UInt12(static_cast<int32_t>(-immediate)));
2377 moveToCachedReg(right, dataMemoryTempRegister());
2378 m_assembler.cmp<64>(left, dataTempRegister);
2380 return Jump(makeBranch(cond));
2383 Jump branch64(RelationalCondition cond, RegisterID left, Address right)
2385 load64(right, getCachedMemoryTempRegisterIDAndInvalidate());
2386 return branch64(cond, left, memoryTempRegister);
2389 Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
2391 load64(left.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
2392 return branch64(cond, dataTempRegister, right);
2395 Jump branch64(RelationalCondition cond, Address left, RegisterID right)
2397 load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
2398 return branch64(cond, memoryTempRegister, right);
2401 Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right)
2403 load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
2404 return branch64(cond, memoryTempRegister, right);
2407 Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
2409 load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
2410 return branch64(cond, memoryTempRegister, right);
2413 Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
2415 TrustedImm32 right8(static_cast<int8_t>(right.m_value));
2416 load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
2417 return branch32(cond, memoryTempRegister, right8);
2420 Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
2422 TrustedImm32 right8(static_cast<int8_t>(right.m_value));
2423 load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
2424 return branch32(cond, memoryTempRegister, right8);
2427 Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
2429 TrustedImm32 right8(static_cast<int8_t>(right.m_value));
2430 load8(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
2431 return branch32(cond, memoryTempRegister, right8);
2434 Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
2436 if (reg == mask && (cond == Zero || cond == NonZero))
2437 return Jump(makeCompareAndBranch<32>(static_cast<ZeroCondition>(cond), reg));
2438 m_assembler.tst<32>(reg, mask);
2439 return Jump(makeBranch(cond));
2442 void test32(RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
2444 if (mask.m_value == -1)
2445 m_assembler.tst<32>(reg, reg);
2447 LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value);
2449 if (logicalImm.isValid())
2450 m_assembler.tst<32>(reg, logicalImm);
2452 move(mask, getCachedDataTempRegisterIDAndInvalidate());
2453 m_assembler.tst<32>(reg, dataTempRegister);
2458 Jump branch(ResultCondition cond)
2460 return Jump(makeBranch(cond));
2463 Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
2465 if (mask.m_value == -1) {
2466 if ((cond == Zero) || (cond == NonZero))
2467 return Jump(makeCompareAndBranch<32>(static_cast<ZeroCondition>(cond), reg));
2468 m_assembler.tst<32>(reg, reg);
2469 } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
2470 return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
2472 LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value);
2473 if (logicalImm.isValid()) {
2474 m_assembler.tst<32>(reg, logicalImm);
2475 return Jump(makeBranch(cond));
2478 move(mask, getCachedDataTempRegisterIDAndInvalidate());
2479 m_assembler.tst<32>(reg, dataTempRegister);
2481 return Jump(makeBranch(cond));
2484 Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
2486 load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
2487 return branchTest32(cond, memoryTempRegister, mask);
2490 Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
2492 load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
2493 return branchTest32(cond, memoryTempRegister, mask);
2496 Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask)
2498 if (reg == mask && (cond == Zero || cond == NonZero))
2499 return Jump(makeCompareAndBranch<64>(static_cast<ZeroCondition>(cond), reg));
2500 m_assembler.tst<64>(reg, mask);
2501 return Jump(makeBranch(cond));
2504 Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
2506 if (mask.m_value == -1) {
2507 if ((cond == Zero) || (cond == NonZero))
2508 return Jump(makeCompareAndBranch<64>(static_cast<ZeroCondition>(cond), reg));
2509 m_assembler.tst<64>(reg, reg);
2510 } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
2511 return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
2513 LogicalImmediate logicalImm = LogicalImmediate::create64(mask.m_value);
2515 if (logicalImm.isValid()) {
2516 m_assembler.tst<64>(reg, logicalImm);
2517 return Jump(makeBranch(cond));
2520 signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
2521 m_assembler.tst<64>(reg, dataTempRegister);
2523 return Jump(makeBranch(cond));
2526 Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm64 mask)
2528 if (mask.m_value == -1) {
2529 if ((cond == Zero) || (cond == NonZero))
2530 return Jump(makeCompareAndBranch<64>(static_cast<ZeroCondition>(cond), reg));
2531 m_assembler.tst<64>(reg, reg);
2532 } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
2533 return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
2535 LogicalImmediate logicalImm = LogicalImmediate::create64(mask.m_value);
2537 if (logicalImm.isValid()) {
2538 m_assembler.tst<64>(reg, logicalImm);
2539 return Jump(makeBranch(cond));
2542 move(mask, getCachedDataTempRegisterIDAndInvalidate());
2543 m_assembler.tst<64>(reg, dataTempRegister);
2545 return Jump(makeBranch(cond));
2548 Jump branchTest64(ResultCondition cond, Address address, RegisterID mask)
2550 load64(address, getCachedDataTempRegisterIDAndInvalidate());
2551 return branchTest64(cond, dataTempRegister, mask);
2554 Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
2556 load64(address, getCachedDataTempRegisterIDAndInvalidate());
2557 return branchTest64(cond, dataTempRegister, mask);
2560 Jump branchTest64(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
2562 load64(address, getCachedDataTempRegisterIDAndInvalidate());
2563 return branchTest64(cond, dataTempRegister, mask);
2566 Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
2568 load64(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
2569 return branchTest64(cond, dataTempRegister, mask);
2572 Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
2574 TrustedImm32 mask8(static_cast<int8_t>(mask.m_value));
2575 load8(address, getCachedDataTempRegisterIDAndInvalidate());
2576 return branchTest32(cond, dataTempRegister, mask8);
2579 Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
2581 TrustedImm32 mask8(static_cast<int8_t>(mask.m_value));
2582 load8(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
2583 return branchTest32(cond, dataTempRegister, mask8);
2586 Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
2588 TrustedImm32 mask8(static_cast<int8_t>(mask.m_value));
2589 move(TrustedImmPtr(reinterpret_cast<void*>(address.offset)), getCachedDataTempRegisterIDAndInvalidate());
2590 m_assembler.ldrb(dataTempRegister, address.base, dataTempRegister);
2591 return branchTest32(cond, dataTempRegister, mask8);
2594 Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
2596 TrustedImm32 mask8(static_cast<int8_t>(mask.m_value));
2597 load8(address, getCachedDataTempRegisterIDAndInvalidate());
2598 return branchTest32(cond, dataTempRegister, mask8);
2601 Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
2603 return branch32(cond, left, right);
2607 // Arithmetic control flow operations:
2609 // This set of conditional branch operations branch based
2610 // on the result of an arithmetic operation. The operation
2611 // is performed as normal, storing the result.
2613 // * jz operations branch if the result is zero.
2614 // * jo operations branch if the (signed) arithmetic
2615 // operation caused an overflow to occur.
2617 Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
2619 m_assembler.add<32, S>(dest, op1, op2);
2620 return Jump(makeBranch(cond));
2623 Jump branchAdd32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
2625 if (isUInt12(imm.m_value)) {
2626 m_assembler.add<32, S>(dest, op1, UInt12(imm.m_value));
2627 return Jump(makeBranch(cond));
2629 if (isUInt12(-imm.m_value)) {
2630 m_assembler.sub<32, S>(dest, op1, UInt12(-imm.m_value));
2631 return Jump(makeBranch(cond));
2634 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
2635 return branchAdd32(cond, op1, dataTempRegister, dest);
2638 Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
2640 load32(src, getCachedDataTempRegisterIDAndInvalidate());
2641 return branchAdd32(cond, dest, dataTempRegister, dest);
2644 Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
2646 return branchAdd32(cond, dest, src, dest);
2649 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
2651 return branchAdd32(cond, dest, imm, dest);
2654 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress address)
2656 load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
2658 if (isUInt12(imm.m_value)) {
2659 m_assembler.add<32, S>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
2660 store32(dataTempRegister, address.m_ptr);
2661 } else if (isUInt12(-imm.m_value)) {
2662 m_assembler.sub<32, S>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
2663 store32(dataTempRegister, address.m_ptr);
2665 move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
2666 m_assembler.add<32, S>(dataTempRegister, dataTempRegister, memoryTempRegister);
2667 store32(dataTempRegister, address.m_ptr);
2670 return Jump(makeBranch(cond));
2673 Jump branchAdd64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
2675 m_assembler.add<64, S>(dest, op1, op2);
2676 return Jump(makeBranch(cond));
2679 Jump branchAdd64(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
2681 if (isUInt12(imm.m_value)) {
2682 m_assembler.add<64, S>(dest, op1, UInt12(imm.m_value));
2683 return Jump(makeBranch(cond));
2685 if (isUInt12(-imm.m_value)) {
2686 m_assembler.sub<64, S>(dest, op1, UInt12(-imm.m_value));
2687 return Jump(makeBranch(cond));
2690 move(imm, getCachedDataTempRegisterIDAndInvalidate());
2691 return branchAdd64(cond, op1, dataTempRegister, dest);
2694 Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest)
2696 return branchAdd64(cond, dest, src, dest);
2699 Jump branchAdd64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
2701 return branchAdd64(cond, dest, imm, dest);
2704 Jump branchAdd64(RelationalCondition cond, TrustedImm32 imm, RegisterID dest)
2706 ASSERT(isUInt12(imm.m_value));
2707 m_assembler.add<64, S>(dest, dest, UInt12(imm.m_value));
2708 return Jump(makeBranch(cond));
2711 Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID scratch1, RegisterID scratch2, RegisterID dest)
2713 ASSERT(cond != Signed);
2715 if (cond != Overflow) {
2716 m_assembler.mul<32>(dest, src1, src2);
2717 return branchTest32(cond, dest);
2720 // This is a signed multiple of two 32-bit values, producing a 64-bit result.
2721 m_assembler.smull(dest, src1, src2);
2722 // Copy bits 63..32 of the result to bits 31..0 of scratch1.
2723 m_assembler.asr<64>(scratch1, dest, 32);
2724 // Splat bit 31 of the result to bits 31..0 of scratch2.
2725 m_assembler.asr<32>(scratch2, dest, 31);
2726 // After a mul32 the top 32 bits of the register should be clear.
2727 zeroExtend32ToPtr(dest, dest);
2728 // Check that bits 31..63 of the original result were all equal.
2729 return branch32(NotEqual, scratch2, scratch1);
2732 Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
2734 return branchMul32(cond, src1, src2, getCachedDataTempRegisterIDAndInvalidate(), getCachedMemoryTempRegisterIDAndInvalidate(), dest);
2737 Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
2739 return branchMul32(cond, dest, src, dest);
2742 Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
2744 move(imm, getCachedDataTempRegisterIDAndInvalidate());
2745 return branchMul32(cond, dataTempRegister, src, dest);
2748 Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID scratch1, RegisterID scratch2, RegisterID dest)
2750 ASSERT(cond != Signed);
2752 // This is a signed multiple of two 64-bit values, producing a 64-bit result.
2753 m_assembler.mul<64>(dest, src1, src2);
2755 if (cond != Overflow)
2756 return branchTest64(cond, dest);
2758 // Compute bits 127..64 of the result into scratch1.
2759 m_assembler.smulh(scratch1, src1, src2);
2760 // Splat bit 63 of the result to bits 63..0 of scratch2.
2761 m_assembler.asr<64>(scratch2, dest, 63);
2762 // Check that bits 31..63 of the original result were all equal.
2763 return branch64(NotEqual, scratch2, scratch1);
2766 Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
2768 return branchMul64(cond, src1, src2, getCachedDataTempRegisterIDAndInvalidate(), getCachedMemoryTempRegisterIDAndInvalidate(), dest);
2771 Jump branchMul64(ResultCondition cond, RegisterID src, RegisterID dest)
2773 return branchMul64(cond, dest, src, dest);
2776 Jump branchNeg32(ResultCondition cond, RegisterID dest)
2778 m_assembler.neg<32, S>(dest, dest);
2779 return Jump(makeBranch(cond));
2782 Jump branchNeg64(ResultCondition cond, RegisterID srcDest)
2784 m_assembler.neg<64, S>(srcDest, srcDest);
2785 return Jump(makeBranch(cond));
2788 Jump branchSub32(ResultCondition cond, RegisterID dest)
2790 m_assembler.neg<32, S>(dest, dest);
2791 return Jump(makeBranch(cond));
2794 Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
2796 m_assembler.sub<32, S>(dest, op1, op2);
2797 return Jump(makeBranch(cond));
2800 Jump branchSub32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
2802 if (isUInt12(imm.m_value)) {
2803 m_assembler.sub<32, S>(dest, op1, UInt12(imm.m_value));
2804 return Jump(makeBranch(cond));
2806 if (isUInt12(-imm.m_value)) {
2807 m_assembler.add<32, S>(dest, op1, UInt12(-imm.m_value));
2808 return Jump(makeBranch(cond));
2811 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
2812 return branchSub32(cond, op1, dataTempRegister, dest);
2815 Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
2817 return branchSub32(cond, dest, src, dest);
2820 Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
2822 return branchSub32(cond, dest, imm, dest);
2825 Jump branchSub64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
2827 m_assembler.sub<64, S>(dest, op1, op2);
2828 return Jump(makeBranch(cond));
2831 Jump branchSub64(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
2833 if (isUInt12(imm.m_value)) {
2834 m_assembler.sub<64, S>(dest, op1, UInt12(imm.m_value));
2835 return Jump(makeBranch(cond));
2837 if (isUInt12(-imm.m_value)) {
2838 m_assembler.add<64, S>(dest, op1, UInt12(-imm.m_value));
2839 return Jump(makeBranch(cond));
2842 move(imm, getCachedDataTempRegisterIDAndInvalidate());
2843 return branchSub64(cond, op1, dataTempRegister, dest);
2846 Jump branchSub64(ResultCondition cond, RegisterID src, RegisterID dest)
2848 return branchSub64(cond, dest, src, dest);
2851 Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
2853 return branchSub64(cond, dest, imm, dest);
2856 Jump branchSub64(RelationalCondition cond, TrustedImm32 imm, RegisterID dest)
2858 ASSERT(isUInt12(imm.m_value));
2859 m_assembler.sub<64, S>(dest, dest, UInt12(imm.m_value));
2860 return Jump(makeBranch(cond));
2864 // Jumps, calls, returns
2866 ALWAYS_INLINE Call call()
2868 AssemblerLabel pointerLabel = m_assembler.label();
2869 moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate());
2870 invalidateAllTempRegisters();
2871 m_assembler.blr(dataTempRegister);
2872 AssemblerLabel callLabel = m_assembler.label();
2873 ASSERT_UNUSED(pointerLabel, ARM64Assembler::getDifferenceBetweenLabels(callLabel, pointerLabel) == REPATCH_OFFSET_CALL_TO_POINTER);
2874 return Call(callLabel, Call::Linkable);
2877 ALWAYS_INLINE Call call(RegisterID target)
2879 invalidateAllTempRegisters();
2880 m_assembler.blr(target);
2881 return Call(m_assembler.label(), Call::None);
2884 ALWAYS_INLINE Call call(Address address)
2886 load64(address, getCachedDataTempRegisterIDAndInvalidate());
2887 return call(dataTempRegister);
2890 ALWAYS_INLINE Jump jump()
2892 AssemblerLabel label = m_assembler.label();
2894 return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpNoConditionFixedSize : ARM64Assembler::JumpNoCondition);
2897 void jump(RegisterID target)
2899 m_assembler.br(target);
2902 void jump(Address address)
2904 load64(address, getCachedDataTempRegisterIDAndInvalidate());
2905 m_assembler.br(dataTempRegister);
2908 void jump(BaseIndex address)
2910 load64(address, getCachedDataTempRegisterIDAndInvalidate());
2911 m_assembler.br(dataTempRegister);
2914 void jump(AbsoluteAddress address)
2916 move(TrustedImmPtr(address.m_ptr), getCachedDataTempRegisterIDAndInvalidate());
2917 load64(Address(dataTempRegister), dataTempRegister);
2918 m_assembler.br(dataTempRegister);
2921 ALWAYS_INLINE Call makeTailRecursiveCall(Jump oldJump)
2924 return tailRecursiveCall();
2927 ALWAYS_INLINE Call nearCall()
2930 return Call(m_assembler.label(), Call::LinkableNear);
2933 ALWAYS_INLINE Call nearTailCall()
2935 AssemblerLabel label = m_assembler.label();
2937 return Call(label, Call::LinkableNearTail);
2940 ALWAYS_INLINE void ret()
2945 ALWAYS_INLINE Call tailRecursiveCall()
2947 // Like a normal call, but don't link.
2948 AssemblerLabel pointerLabel = m_assembler.label();
2949 moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate());
2950 m_assembler.br(dataTempRegister);
2951 AssemblerLabel callLabel = m_assembler.label();
2952 ASSERT_UNUSED(pointerLabel, ARM64Assembler::getDifferenceBetweenLabels(callLabel, pointerLabel) == REPATCH_OFFSET_CALL_TO_POINTER);
2953 return Call(callLabel, Call::Linkable);
2957 // Comparisons operations
2959 void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
2961 m_assembler.cmp<32>(left, right);
2962 m_assembler.cset<32>(dest, ARM64Condition(cond));
2965 void compare32(RelationalCondition cond, Address left, RegisterID right, RegisterID dest)
2967 load32(left, getCachedDataTempRegisterIDAndInvalidate());
2968 m_assembler.cmp<32>(dataTempRegister, right);
2969 m_assembler.cset<32>(dest, ARM64Condition(cond));
2972 void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
2974 if (!right.m_value) {
2975 if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
2976 test32(*resultCondition, left, left, dest);
2981 if (isUInt12(right.m_value))
2982 m_assembler.cmp<32>(left, UInt12(right.m_value));
2983 else if (isUInt12(-right.m_value))
2984 m_assembler.cmn<32>(left, UInt12(-right.m_value));
2986 move(right, getCachedDataTempRegisterIDAndInvalidate());
2987 m_assembler.cmp<32>(left, dataTempRegister);
2989 m_assembler.cset<32>(dest, ARM64Condition(cond));
2992 void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
2994 m_assembler.cmp<64>(left, right);
2995 m_assembler.cset<32>(dest, ARM64Condition(cond));
2998 void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
3000 if (!right.m_value) {
3001 if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
3002 test64(*resultCondition, left, left, dest);
3007 signExtend32ToPtr(right, getCachedDataTempRegisterIDAndInvalidate());
3008 m_assembler.cmp<64>(left, dataTempRegister);
3009 m_assembler.cset<32>(dest, ARM64Condition(cond));
3012 void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
3014 TrustedImm32 right8(static_cast<int8_t>(right.m_value));
3015 load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
3016 move(right8, getCachedDataTempRegisterIDAndInvalidate());
3017 compare32(cond, memoryTempRegister, dataTempRegister, dest);
3020 void test32(ResultCondition cond, RegisterID src, RegisterID mask, RegisterID dest)
3022 m_assembler.tst<32>(src, mask);
3023 m_assembler.cset<32>(dest, ARM64Condition(cond));
3026 void test32(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest)
3029 m_assembler.cset<32>(dest, ARM64Condition(cond));
3032 void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
3034 load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
3035 test32(cond, memoryTempRegister, mask, dest);
3038 void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
3040 TrustedImm32 mask8(static_cast<int8_t>(mask.m_value));
3041 load8(address, getCachedMemoryTempRegisterIDAndInvalidate());
3042 test32(cond, memoryTempRegister, mask8, dest);
3045 void test64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
3047 m_assembler.tst<64>(op1, op2);
3048 m_assembler.cset<32>(dest, ARM64Condition(cond));
3051 void test64(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest)
3053 if (mask.m_value == -1)
3054 m_assembler.tst<64>(src, src);
3056 signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
3057 m_assembler.tst<64>(src, dataTempRegister);
3059 m_assembler.cset<32>(dest, ARM64Condition(cond));
3062 void setCarry(RegisterID dest)
3064 m_assembler.cset<32>(dest, ARM64Assembler::ConditionCS);
3067 // Patchable operations
3069 ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dest)
3071 DataLabel32 label(this);
3072 moveWithFixedWidth(imm, dest);
3076 ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dest)
3078 DataLabelPtr label(this);
3079 moveWithFixedWidth(imm, dest);
3083 ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
3085 dataLabel = DataLabelPtr(this);
3086 moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
3087 return branch64(cond, left, dataTempRegister);
3090 ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
3092 dataLabel = DataLabelPtr(this);
3093 moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
3094 return branch64(cond, left, dataTempRegister);
3097 ALWAYS_INLINE Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
3099 dataLabel = DataLabel32(this);
3100 moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
3101 return branch32(cond, left, dataTempRegister);
3104 PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
3106 m_makeJumpPatchable = true;
3107 Jump result = branch64(cond, left, TrustedImm64(right));
3108 m_makeJumpPatchable = false;
3109 return PatchableJump(result);
3112 PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
3114 m_makeJumpPatchable = true;
3115 Jump result = branchTest32(cond, reg, mask);
3116 m_makeJumpPatchable = false;
3117 return PatchableJump(result);
3120 PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
3122 m_makeJumpPatchable = true;
3123 Jump result = branch32(cond, reg, imm);
3124 m_makeJumpPatchable = false;
3125 return PatchableJump(result);
3128 PatchableJump patchableBranch32(RelationalCondition cond, Address left, TrustedImm32 imm)
3130 m_makeJumpPatchable = true;
3131 Jump result = branch32(cond, left, imm);
3132 m_makeJumpPatchable = false;
3133 return PatchableJump(result);
3136 PatchableJump patchableBranch64(RelationalCondition cond, RegisterID reg, TrustedImm64 imm)
3138 m_makeJumpPatchable = true;
3139 Jump result = branch64(cond, reg, imm);
3140 m_makeJumpPatchable = false;
3141 return PatchableJump(result);
3144 PatchableJump patchableBranch64(RelationalCondition cond, RegisterID left, RegisterID right)
3146 m_makeJumpPatchable = true;
3147 Jump result = branch64(cond, left, right);
3148 m_makeJumpPatchable = false;
3149 return PatchableJump(result);
3152 PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
3154 m_makeJumpPatchable = true;
3155 Jump result = branchPtrWithPatch(cond, left, dataLabel, initialRightValue);
3156 m_makeJumpPatchable = false;
3157 return PatchableJump(result);
3160 PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
3162 m_makeJumpPatchable = true;
3163 Jump result = branch32WithPatch(cond, left, dataLabel, initialRightValue);
3164 m_makeJumpPatchable = false;
3165 return PatchableJump(result);
3168 PatchableJump patchableJump()
3170 m_makeJumpPatchable = true;
3171 Jump result = jump();
3172 m_makeJumpPatchable = false;
3173 return PatchableJump(result);
3176 ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
3178 DataLabelPtr label(this);
3179 moveWithFixedWidth(initialValue, getCachedDataTempRegisterIDAndInvalidate());
3180 store64(dataTempRegister, address);
3184 ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address)
3186 return storePtrWithPatch(TrustedImmPtr(0), address);
3189 static void reemitInitialMoveWithPatch(void* address, void* value)
3191 ARM64Assembler::setPointer(static_cast<int*>(address), value, dataTempRegister, true);
3194 // Miscellaneous operations:
3196 void breakpoint(uint16_t imm = 0)
3198 m_assembler.brk(imm);
3208 m_assembler.dmbSY();
3212 // Misc helper functions.
3214 // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
3215 static RelationalCondition invert(RelationalCondition cond)
3217 return static_cast<RelationalCondition>(ARM64Assembler::invert(static_cast<ARM64Assembler::Condition>(cond)));
3220 static Optional<ResultCondition> commuteCompareToZeroIntoTest(RelationalCondition cond)
3229 case GreaterThanOrEqual:
3230 return PositiveOrZero;
3237 static FunctionPtr readCallTarget(CodeLocationCall call)
3239 return FunctionPtr(reinterpret_cast<void(*)()>(ARM64Assembler::readCallTarget(call.dataLocation())));
3242 static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
3244 ARM64Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
3247 static ptrdiff_t maxJumpReplacementSize()
3249 return ARM64Assembler::maxJumpReplacementSize();
3252 RegisterID scratchRegisterForBlinding()
3254 // We *do not* have a scratch register for blinding.
3255 RELEASE_ASSERT_NOT_REACHED();
3256 return getCachedDataTempRegisterIDAndInvalidate();
3259 static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
3260 static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
3262 static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
3264 return label.labelAtOffset(0);
3267 static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
3269 UNREACHABLE_FOR_PLATFORM();
3270 return CodeLocationLabel();
3273 static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
3275 UNREACHABLE_FOR_PLATFORM();
3276 return CodeLocationLabel();
3279 static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
3281 reemitInitialMoveWithPatch(instructionStart.dataLocation(), initialValue);
3284 static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
3286 UNREACHABLE_FOR_PLATFORM();
3289 static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
3291 UNREACHABLE_FOR_PLATFORM();
3294 static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
3296 ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
3299 static void repatchCall(CodeLocationCall call, FunctionPtr destination)
3301 ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
3304 #if ENABLE(MASM_PROBE)