65461070ab1d60ddac007ac838262f8dc3bd53ed
[WebKit-https.git] / Source / JavaScriptCore / assembler / MacroAssemblerARM64.h
1 /*
2  * Copyright (C) 2012, 2014-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #pragma once
27
28 #if ENABLE(ASSEMBLER)
29
30 #include "ARM64Assembler.h"
31 #include "AbstractMacroAssembler.h"
32 #include <wtf/MathExtras.h>
33 #include <wtf/Optional.h>
34
35 namespace JSC {
36
37 class MacroAssemblerARM64 : public AbstractMacroAssembler<ARM64Assembler, MacroAssemblerARM64> {
38 public:
39     static const unsigned numGPRs = 32;
40     static const unsigned numFPRs = 32;
41     
42     static const RegisterID dataTempRegister = ARM64Registers::ip0;
43     static const RegisterID memoryTempRegister = ARM64Registers::ip1;
44
45     RegisterID scratchRegister()
46     {
47         RELEASE_ASSERT(m_allowScratchRegister);
48         return getCachedDataTempRegisterIDAndInvalidate();
49     }
50
51 private:
52     static const ARM64Registers::FPRegisterID fpTempRegister = ARM64Registers::q31;
53     static const ARM64Assembler::SetFlags S = ARM64Assembler::S;
54     static const intptr_t maskHalfWord0 = 0xffffl;
55     static const intptr_t maskHalfWord1 = 0xffff0000l;
56     static const intptr_t maskUpperWord = 0xffffffff00000000l;
57
58     // 4 instructions - 3 to load the function pointer, + blr.
59     static const ptrdiff_t REPATCH_OFFSET_CALL_TO_POINTER = -16;
60     
61 public:
62     MacroAssemblerARM64()
63         : m_dataMemoryTempRegister(this, dataTempRegister)
64         , m_cachedMemoryTempRegister(this, memoryTempRegister)
65         , m_makeJumpPatchable(false)
66     {
67     }
68
69     typedef ARM64Assembler::LinkRecord LinkRecord;
70     typedef ARM64Assembler::JumpType JumpType;
71     typedef ARM64Assembler::JumpLinkType JumpLinkType;
72     typedef ARM64Assembler::Condition Condition;
73
74     static const ARM64Assembler::Condition DefaultCondition = ARM64Assembler::ConditionInvalid;
75     static const ARM64Assembler::JumpType DefaultJump = ARM64Assembler::JumpNoConditionFixedSize;
76
77     Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); }
78     void* unlinkedCode() { return m_assembler.unlinkedCode(); }
79     static bool canCompact(JumpType jumpType) { return ARM64Assembler::canCompact(jumpType); }
80     static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return ARM64Assembler::computeJumpType(jumpType, from, to); }
81     static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return ARM64Assembler::computeJumpType(record, from, to); }
82     static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return ARM64Assembler::jumpSizeDelta(jumpType, jumpLinkType); }
83     static void link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction, uint8_t* to) { return ARM64Assembler::link(record, from, fromInstruction, to); }
84
85     static const Scale ScalePtr = TimesEight;
86
87     static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
88     {
89         // This is the largest 32-bit access allowed, aligned to 64-bit boundary.
90         return !(value & ~0x3ff8);
91     }
92
93     enum RelationalCondition {
94         Equal = ARM64Assembler::ConditionEQ,
95         NotEqual = ARM64Assembler::ConditionNE,
96         Above = ARM64Assembler::ConditionHI,
97         AboveOrEqual = ARM64Assembler::ConditionHS,
98         Below = ARM64Assembler::ConditionLO,
99         BelowOrEqual = ARM64Assembler::ConditionLS,
100         GreaterThan = ARM64Assembler::ConditionGT,
101         GreaterThanOrEqual = ARM64Assembler::ConditionGE,
102         LessThan = ARM64Assembler::ConditionLT,
103         LessThanOrEqual = ARM64Assembler::ConditionLE
104     };
105
106     enum ResultCondition {
107         Overflow = ARM64Assembler::ConditionVS,
108         Signed = ARM64Assembler::ConditionMI,
109         PositiveOrZero = ARM64Assembler::ConditionPL,
110         Zero = ARM64Assembler::ConditionEQ,
111         NonZero = ARM64Assembler::ConditionNE
112     };
113
114     enum ZeroCondition {
115         IsZero = ARM64Assembler::ConditionEQ,
116         IsNonZero = ARM64Assembler::ConditionNE
117     };
118
119     enum DoubleCondition {
120         // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
121         DoubleEqual = ARM64Assembler::ConditionEQ,
122         DoubleNotEqual = ARM64Assembler::ConditionVC, // Not the right flag! check for this & handle differently.
123         DoubleGreaterThan = ARM64Assembler::ConditionGT,
124         DoubleGreaterThanOrEqual = ARM64Assembler::ConditionGE,
125         DoubleLessThan = ARM64Assembler::ConditionLO,
126         DoubleLessThanOrEqual = ARM64Assembler::ConditionLS,
127         // If either operand is NaN, these conditions always evaluate to true.
128         DoubleEqualOrUnordered = ARM64Assembler::ConditionVS, // Not the right flag! check for this & handle differently.
129         DoubleNotEqualOrUnordered = ARM64Assembler::ConditionNE,
130         DoubleGreaterThanOrUnordered = ARM64Assembler::ConditionHI,
131         DoubleGreaterThanOrEqualOrUnordered = ARM64Assembler::ConditionHS,
132         DoubleLessThanOrUnordered = ARM64Assembler::ConditionLT,
133         DoubleLessThanOrEqualOrUnordered = ARM64Assembler::ConditionLE,
134     };
135
136     static const RegisterID stackPointerRegister = ARM64Registers::sp;
137     static const RegisterID framePointerRegister = ARM64Registers::fp;
138     static const RegisterID linkRegister = ARM64Registers::lr;
139
140     // FIXME: Get reasonable implementations for these
141     static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
142     static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; }
143
144     // Integer operations:
145
146     void add32(RegisterID a, RegisterID b, RegisterID dest)
147     {
148         ASSERT(a != ARM64Registers::sp && b != ARM64Registers::sp);
149         m_assembler.add<32>(dest, a, b);
150     }
151
152     void add32(RegisterID src, RegisterID dest)
153     {
154         m_assembler.add<32>(dest, dest, src);
155     }
156
157     void add32(TrustedImm32 imm, RegisterID dest)
158     {
159         add32(imm, dest, dest);
160     }
161
162     void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
163     {
164         if (isUInt12(imm.m_value))
165             m_assembler.add<32>(dest, src, UInt12(imm.m_value));
166         else if (isUInt12(-imm.m_value))
167             m_assembler.sub<32>(dest, src, UInt12(-imm.m_value));
168         else if (src != dest) {
169             move(imm, dest);
170             add32(src, dest);
171         } else {
172             move(imm, getCachedDataTempRegisterIDAndInvalidate());
173             m_assembler.add<32>(dest, src, dataTempRegister);
174         }
175     }
176
177     void add32(TrustedImm32 imm, Address address)
178     {
179         load32(address, getCachedDataTempRegisterIDAndInvalidate());
180
181         if (isUInt12(imm.m_value))
182             m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
183         else if (isUInt12(-imm.m_value))
184             m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
185         else {
186             move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
187             m_assembler.add<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
188         }
189
190         store32(dataTempRegister, address);
191     }
192
193     void add32(TrustedImm32 imm, AbsoluteAddress address)
194     {
195         load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
196
197         if (isUInt12(imm.m_value)) {
198             m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
199             store32(dataTempRegister, address.m_ptr);
200             return;
201         }
202
203         if (isUInt12(-imm.m_value)) {
204             m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
205             store32(dataTempRegister, address.m_ptr);
206             return;
207         }
208
209         move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
210         m_assembler.add<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
211         store32(dataTempRegister, address.m_ptr);
212     }
213
214     void add32(Address src, RegisterID dest)
215     {
216         load32(src, getCachedDataTempRegisterIDAndInvalidate());
217         add32(dataTempRegister, dest);
218     }
219
220     void add64(RegisterID a, RegisterID b, RegisterID dest)
221     {
222         ASSERT(a != ARM64Registers::sp || b != ARM64Registers::sp);
223         if (b == ARM64Registers::sp)
224             std::swap(a, b);
225         m_assembler.add<64>(dest, a, b);
226     }
227
228     void add64(RegisterID src, RegisterID dest)
229     {
230         if (src == ARM64Registers::sp)
231             m_assembler.add<64>(dest, src, dest);
232         else
233             m_assembler.add<64>(dest, dest, src);
234     }
235
236     void add64(TrustedImm32 imm, RegisterID dest)
237     {
238         if (isUInt12(imm.m_value)) {
239             m_assembler.add<64>(dest, dest, UInt12(imm.m_value));
240             return;
241         }
242         if (isUInt12(-imm.m_value)) {
243             m_assembler.sub<64>(dest, dest, UInt12(-imm.m_value));
244             return;
245         }
246
247         signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
248         m_assembler.add<64>(dest, dest, dataTempRegister);
249     }
250
251     void add64(TrustedImm64 imm, RegisterID dest)
252     {
253         intptr_t immediate = imm.m_value;
254
255         if (isUInt12(immediate)) {
256             m_assembler.add<64>(dest, dest, UInt12(static_cast<int32_t>(immediate)));
257             return;
258         }
259         if (isUInt12(-immediate)) {
260             m_assembler.sub<64>(dest, dest, UInt12(static_cast<int32_t>(-immediate)));
261             return;
262         }
263
264         move(imm, getCachedDataTempRegisterIDAndInvalidate());
265         m_assembler.add<64>(dest, dest, dataTempRegister);
266     }
267
268     void add64(TrustedImm32 imm, RegisterID src, RegisterID dest)
269     {
270         if (isUInt12(imm.m_value)) {
271             m_assembler.add<64>(dest, src, UInt12(imm.m_value));
272             return;
273         }
274         if (isUInt12(-imm.m_value)) {
275             m_assembler.sub<64>(dest, src, UInt12(-imm.m_value));
276             return;
277         }
278
279         signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
280         m_assembler.add<64>(dest, src, dataTempRegister);
281     }
282
283     void add64(TrustedImm32 imm, Address address)
284     {
285         load64(address, getCachedDataTempRegisterIDAndInvalidate());
286
287         if (isUInt12(imm.m_value))
288             m_assembler.add<64>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
289         else if (isUInt12(-imm.m_value))
290             m_assembler.sub<64>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
291         else {
292             signExtend32ToPtr(imm, getCachedMemoryTempRegisterIDAndInvalidate());
293             m_assembler.add<64>(dataTempRegister, dataTempRegister, memoryTempRegister);
294         }
295
296         store64(dataTempRegister, address);
297     }
298
299     void add64(TrustedImm32 imm, AbsoluteAddress address)
300     {
301         load64(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
302
303         if (isUInt12(imm.m_value)) {
304             m_assembler.add<64>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
305             store64(dataTempRegister, address.m_ptr);
306             return;
307         }
308
309         if (isUInt12(-imm.m_value)) {
310             m_assembler.sub<64>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
311             store64(dataTempRegister, address.m_ptr);
312             return;
313         }
314
315         signExtend32ToPtr(imm, getCachedMemoryTempRegisterIDAndInvalidate());
316         m_assembler.add<64>(dataTempRegister, dataTempRegister, memoryTempRegister);
317         store64(dataTempRegister, address.m_ptr);
318     }
319
320     void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest)
321     {
322         add64(imm, srcDest);
323     }
324
325     void add64(Address src, RegisterID dest)
326     {
327         load64(src, getCachedDataTempRegisterIDAndInvalidate());
328         m_assembler.add<64>(dest, dest, dataTempRegister);
329     }
330
331     void add64(AbsoluteAddress src, RegisterID dest)
332     {
333         load64(src.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
334         m_assembler.add<64>(dest, dest, dataTempRegister);
335     }
336
337     void and32(RegisterID src, RegisterID dest)
338     {
339         and32(dest, src, dest);
340     }
341
342     void and32(RegisterID op1, RegisterID op2, RegisterID dest)
343     {
344         m_assembler.and_<32>(dest, op1, op2);
345     }
346
347     void and32(TrustedImm32 imm, RegisterID dest)
348     {
349         and32(imm, dest, dest);
350     }
351
352     void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
353     {
354         LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
355
356         if (logicalImm.isValid()) {
357             m_assembler.and_<32>(dest, src, logicalImm);
358             return;
359         }
360
361         move(imm, getCachedDataTempRegisterIDAndInvalidate());
362         m_assembler.and_<32>(dest, src, dataTempRegister);
363     }
364
365     void and32(Address src, RegisterID dest)
366     {
367         load32(src, dataTempRegister);
368         and32(dataTempRegister, dest);
369     }
370
371     void and64(RegisterID src1, RegisterID src2, RegisterID dest)
372     {
373         m_assembler.and_<64>(dest, src1, src2);
374     }
375
376     void and64(TrustedImm64 imm, RegisterID src, RegisterID dest)
377     {
378         LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value);
379
380         if (logicalImm.isValid()) {
381             m_assembler.and_<64>(dest, src, logicalImm);
382             return;
383         }
384
385         move(imm, getCachedDataTempRegisterIDAndInvalidate());
386         m_assembler.and_<64>(dest, src, dataTempRegister);
387     }
388
389     void and64(RegisterID src, RegisterID dest)
390     {
391         m_assembler.and_<64>(dest, dest, src);
392     }
393
394     void and64(TrustedImm32 imm, RegisterID dest)
395     {
396         LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
397
398         if (logicalImm.isValid()) {
399             m_assembler.and_<64>(dest, dest, logicalImm);
400             return;
401         }
402
403         signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
404         m_assembler.and_<64>(dest, dest, dataTempRegister);
405     }
406
407     void and64(TrustedImmPtr imm, RegisterID dest)
408     {
409         LogicalImmediate logicalImm = LogicalImmediate::create64(reinterpret_cast<uint64_t>(imm.m_value));
410
411         if (logicalImm.isValid()) {
412             m_assembler.and_<64>(dest, dest, logicalImm);
413             return;
414         }
415
416         move(imm, getCachedDataTempRegisterIDAndInvalidate());
417         m_assembler.and_<64>(dest, dest, dataTempRegister);
418     }
419     
420     void countLeadingZeros32(RegisterID src, RegisterID dest)
421     {
422         m_assembler.clz<32>(dest, src);
423     }
424
425     void countLeadingZeros64(RegisterID src, RegisterID dest)
426     {
427         m_assembler.clz<64>(dest, src);
428     }
429
430     void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
431     {
432         m_assembler.lsl<32>(dest, src, shiftAmount);
433     }
434
435     void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
436     {
437         m_assembler.lsl<32>(dest, src, imm.m_value & 0x1f);
438     }
439
440     void lshift32(RegisterID shiftAmount, RegisterID dest)
441     {
442         lshift32(dest, shiftAmount, dest);
443     }
444
445     void lshift32(TrustedImm32 imm, RegisterID dest)
446     {
447         lshift32(dest, imm, dest);
448     }
449
450     void lshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
451     {
452         m_assembler.lsl<64>(dest, src, shiftAmount);
453     }
454
455     void lshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
456     {
457         m_assembler.lsl<64>(dest, src, imm.m_value & 0x3f);
458     }
459
460     void lshift64(RegisterID shiftAmount, RegisterID dest)
461     {
462         lshift64(dest, shiftAmount, dest);
463     }
464
465     void lshift64(TrustedImm32 imm, RegisterID dest)
466     {
467         lshift64(dest, imm, dest);
468     }
469
470     void mul32(RegisterID left, RegisterID right, RegisterID dest)
471     {
472         m_assembler.mul<32>(dest, left, right);
473     }
474     
475     void mul32(RegisterID src, RegisterID dest)
476     {
477         m_assembler.mul<32>(dest, dest, src);
478     }
479
480     void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
481     {
482         move(imm, getCachedDataTempRegisterIDAndInvalidate());
483         m_assembler.mul<32>(dest, src, dataTempRegister);
484     }
485
486     void mul64(RegisterID src, RegisterID dest)
487     {
488         m_assembler.mul<64>(dest, dest, src);
489     }
490
491     void mul64(RegisterID left, RegisterID right, RegisterID dest)
492     {
493         m_assembler.mul<64>(dest, left, right);
494     }
495
496     void multiplyAdd32(RegisterID mulLeft, RegisterID mulRight, RegisterID summand, RegisterID dest)
497     {
498         m_assembler.madd<32>(dest, mulLeft, mulRight, summand);
499     }
500
501     void multiplySub32(RegisterID mulLeft, RegisterID mulRight, RegisterID minuend, RegisterID dest)
502     {
503         m_assembler.msub<32>(dest, mulLeft, mulRight, minuend);
504     }
505
506     void multiplyNeg32(RegisterID mulLeft, RegisterID mulRight, RegisterID dest)
507     {
508         m_assembler.msub<32>(dest, mulLeft, mulRight, ARM64Registers::zr);
509     }
510
511     void multiplyAdd64(RegisterID mulLeft, RegisterID mulRight, RegisterID summand, RegisterID dest)
512     {
513         m_assembler.madd<64>(dest, mulLeft, mulRight, summand);
514     }
515
516     void multiplySub64(RegisterID mulLeft, RegisterID mulRight, RegisterID minuend, RegisterID dest)
517     {
518         m_assembler.msub<64>(dest, mulLeft, mulRight, minuend);
519     }
520
521     void multiplyNeg64(RegisterID mulLeft, RegisterID mulRight, RegisterID dest)
522     {
523         m_assembler.msub<64>(dest, mulLeft, mulRight, ARM64Registers::zr);
524     }
525
526     void div32(RegisterID dividend, RegisterID divisor, RegisterID dest)
527     {
528         m_assembler.sdiv<32>(dest, dividend, divisor);
529     }
530
531     void div64(RegisterID dividend, RegisterID divisor, RegisterID dest)
532     {
533         m_assembler.sdiv<64>(dest, dividend, divisor);
534     }
535
536     void uDiv32(RegisterID dividend, RegisterID divisor, RegisterID dest)
537     {
538         m_assembler.udiv<32>(dest, dividend, divisor);
539     }
540
541     void uDiv64(RegisterID dividend, RegisterID divisor, RegisterID dest)
542     {
543         m_assembler.udiv<64>(dest, dividend, divisor);
544     }
545
546     void neg32(RegisterID dest)
547     {
548         m_assembler.neg<32>(dest, dest);
549     }
550
551     void neg64(RegisterID dest)
552     {
553         m_assembler.neg<64>(dest, dest);
554     }
555
556     void or32(RegisterID src, RegisterID dest)
557     {
558         or32(dest, src, dest);
559     }
560
561     void or32(RegisterID op1, RegisterID op2, RegisterID dest)
562     {
563         m_assembler.orr<32>(dest, op1, op2);
564     }
565
566     void or32(TrustedImm32 imm, RegisterID dest)
567     {
568         or32(imm, dest, dest);
569     }
570
571     void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
572     {
573         LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
574
575         if (logicalImm.isValid()) {
576             m_assembler.orr<32>(dest, src, logicalImm);
577             return;
578         }
579
580         ASSERT(src != dataTempRegister);
581         move(imm, getCachedDataTempRegisterIDAndInvalidate());
582         m_assembler.orr<32>(dest, src, dataTempRegister);
583     }
584
585     void or32(RegisterID src, AbsoluteAddress address)
586     {
587         load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
588         m_assembler.orr<32>(dataTempRegister, dataTempRegister, src);
589         store32(dataTempRegister, address.m_ptr);
590     }
591
592     void or32(TrustedImm32 imm, AbsoluteAddress address)
593     {
594         LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
595         if (logicalImm.isValid()) {
596             load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
597             m_assembler.orr<32>(dataTempRegister, dataTempRegister, logicalImm);
598             store32(dataTempRegister, address.m_ptr);
599         } else {
600             load32(address.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
601             or32(imm, memoryTempRegister, getCachedDataTempRegisterIDAndInvalidate());
602             store32(dataTempRegister, address.m_ptr);
603         }
604     }
605
606     void or32(TrustedImm32 imm, Address address)
607     {
608         load32(address, getCachedDataTempRegisterIDAndInvalidate());
609         or32(imm, dataTempRegister, dataTempRegister);
610         store32(dataTempRegister, address);
611     }
612
613     void or64(RegisterID src, RegisterID dest)
614     {
615         or64(dest, src, dest);
616     }
617
618     void or64(RegisterID op1, RegisterID op2, RegisterID dest)
619     {
620         m_assembler.orr<64>(dest, op1, op2);
621     }
622
623     void or64(TrustedImm32 imm, RegisterID dest)
624     {
625         or64(imm, dest, dest);
626     }
627
628     void or64(TrustedImm32 imm, RegisterID src, RegisterID dest)
629     {
630         LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
631
632         if (logicalImm.isValid()) {
633             m_assembler.orr<64>(dest, src, logicalImm);
634             return;
635         }
636
637         signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
638         m_assembler.orr<64>(dest, src, dataTempRegister);
639     }
640
641     void or64(TrustedImm64 imm, RegisterID src, RegisterID dest)
642     {
643         LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value);
644
645         if (logicalImm.isValid()) {
646             m_assembler.orr<64>(dest, src, logicalImm);
647             return;
648         }
649
650         move(imm, getCachedDataTempRegisterIDAndInvalidate());
651         m_assembler.orr<64>(dest, src, dataTempRegister);
652     }
653
654     void or64(TrustedImm64 imm, RegisterID dest)
655     {
656         LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
657
658         if (logicalImm.isValid()) {
659             m_assembler.orr<64>(dest, dest, logicalImm);
660             return;
661         }
662
663         move(imm, getCachedDataTempRegisterIDAndInvalidate());
664         m_assembler.orr<64>(dest, dest, dataTempRegister);
665     }
666
667     void rotateRight32(RegisterID src, TrustedImm32 imm, RegisterID dest)
668     {
669         m_assembler.ror<32>(dest, src, imm.m_value & 31);
670     }
671
672     void rotateRight32(TrustedImm32 imm, RegisterID srcDst)
673     {
674         rotateRight32(srcDst, imm, srcDst);
675     }
676
677     void rotateRight32(RegisterID src, RegisterID shiftAmmount, RegisterID dest)
678     {
679         m_assembler.ror<32>(dest, src, shiftAmmount);
680     }
681
682     void rotateRight64(RegisterID src, TrustedImm32 imm, RegisterID dest)
683     {
684         m_assembler.ror<64>(dest, src, imm.m_value & 63);
685     }
686
687     void rotateRight64(TrustedImm32 imm, RegisterID srcDst)
688     {
689         rotateRight64(srcDst, imm, srcDst);
690     }
691
692     void rotateRight64(RegisterID src, RegisterID shiftAmmount, RegisterID dest)
693     {
694         m_assembler.ror<64>(dest, src, shiftAmmount);
695     }
696
697     void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
698     {
699         m_assembler.asr<32>(dest, src, shiftAmount);
700     }
701
702     void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
703     {
704         m_assembler.asr<32>(dest, src, imm.m_value & 0x1f);
705     }
706
707     void rshift32(RegisterID shiftAmount, RegisterID dest)
708     {
709         rshift32(dest, shiftAmount, dest);
710     }
711     
712     void rshift32(TrustedImm32 imm, RegisterID dest)
713     {
714         rshift32(dest, imm, dest);
715     }
716     
717     void rshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
718     {
719         m_assembler.asr<64>(dest, src, shiftAmount);
720     }
721     
722     void rshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
723     {
724         m_assembler.asr<64>(dest, src, imm.m_value & 0x3f);
725     }
726     
727     void rshift64(RegisterID shiftAmount, RegisterID dest)
728     {
729         rshift64(dest, shiftAmount, dest);
730     }
731     
732     void rshift64(TrustedImm32 imm, RegisterID dest)
733     {
734         rshift64(dest, imm, dest);
735     }
736
737     void sub32(RegisterID src, RegisterID dest)
738     {
739         m_assembler.sub<32>(dest, dest, src);
740     }
741
742     void sub32(RegisterID left, RegisterID right, RegisterID dest)
743     {
744         m_assembler.sub<32>(dest, left, right);
745     }
746
747     void sub32(TrustedImm32 imm, RegisterID dest)
748     {
749         if (isUInt12(imm.m_value)) {
750             m_assembler.sub<32>(dest, dest, UInt12(imm.m_value));
751             return;
752         }
753         if (isUInt12(-imm.m_value)) {
754             m_assembler.add<32>(dest, dest, UInt12(-imm.m_value));
755             return;
756         }
757
758         move(imm, getCachedDataTempRegisterIDAndInvalidate());
759         m_assembler.sub<32>(dest, dest, dataTempRegister);
760     }
761
762     void sub32(TrustedImm32 imm, Address address)
763     {
764         load32(address, getCachedDataTempRegisterIDAndInvalidate());
765
766         if (isUInt12(imm.m_value))
767             m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
768         else if (isUInt12(-imm.m_value))
769             m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
770         else {
771             move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
772             m_assembler.sub<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
773         }
774
775         store32(dataTempRegister, address);
776     }
777
778     void sub32(TrustedImm32 imm, AbsoluteAddress address)
779     {
780         load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
781
782         if (isUInt12(imm.m_value)) {
783             m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
784             store32(dataTempRegister, address.m_ptr);
785             return;
786         }
787
788         if (isUInt12(-imm.m_value)) {
789             m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
790             store32(dataTempRegister, address.m_ptr);
791             return;
792         }
793
794         move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
795         m_assembler.sub<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
796         store32(dataTempRegister, address.m_ptr);
797     }
798
799     void sub32(Address src, RegisterID dest)
800     {
801         load32(src, getCachedDataTempRegisterIDAndInvalidate());
802         sub32(dataTempRegister, dest);
803     }
804
805     void sub64(RegisterID src, RegisterID dest)
806     {
807         m_assembler.sub<64>(dest, dest, src);
808     }
809
810     void sub64(RegisterID a, RegisterID b, RegisterID dest)
811     {
812         m_assembler.sub<64>(dest, a, b);
813     }
814     
815     void sub64(TrustedImm32 imm, RegisterID dest)
816     {
817         if (isUInt12(imm.m_value)) {
818             m_assembler.sub<64>(dest, dest, UInt12(imm.m_value));
819             return;
820         }
821         if (isUInt12(-imm.m_value)) {
822             m_assembler.add<64>(dest, dest, UInt12(-imm.m_value));
823             return;
824         }
825
826         signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
827         m_assembler.sub<64>(dest, dest, dataTempRegister);
828     }
829     
830     void sub64(TrustedImm64 imm, RegisterID dest)
831     {
832         intptr_t immediate = imm.m_value;
833
834         if (isUInt12(immediate)) {
835             m_assembler.sub<64>(dest, dest, UInt12(static_cast<int32_t>(immediate)));
836             return;
837         }
838         if (isUInt12(-immediate)) {
839             m_assembler.add<64>(dest, dest, UInt12(static_cast<int32_t>(-immediate)));
840             return;
841         }
842
843         move(imm, getCachedDataTempRegisterIDAndInvalidate());
844         m_assembler.sub<64>(dest, dest, dataTempRegister);
845     }
846
847     void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
848     {
849         m_assembler.lsr<32>(dest, src, shiftAmount);
850     }
851     
852     void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
853     {
854         m_assembler.lsr<32>(dest, src, imm.m_value & 0x1f);
855     }
856
857     void urshift32(RegisterID shiftAmount, RegisterID dest)
858     {
859         urshift32(dest, shiftAmount, dest);
860     }
861     
862     void urshift32(TrustedImm32 imm, RegisterID dest)
863     {
864         urshift32(dest, imm, dest);
865     }
866
867     void urshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
868     {
869         m_assembler.lsr<64>(dest, src, shiftAmount);
870     }
871     
872     void urshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
873     {
874         m_assembler.lsr<64>(dest, src, imm.m_value & 0x3f);
875     }
876
877     void urshift64(RegisterID shiftAmount, RegisterID dest)
878     {
879         urshift64(dest, shiftAmount, dest);
880     }
881     
882     void urshift64(TrustedImm32 imm, RegisterID dest)
883     {
884         urshift64(dest, imm, dest);
885     }
886
887     void xor32(RegisterID src, RegisterID dest)
888     {
889         xor32(dest, src, dest);
890     }
891
892     void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
893     {
894         m_assembler.eor<32>(dest, op1, op2);
895     }
896
897     void xor32(TrustedImm32 imm, RegisterID dest)
898     {
899         xor32(imm, dest, dest);
900     }
901
902     void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
903     {
904         if (imm.m_value == -1)
905             m_assembler.mvn<32>(dest, src);
906         else {
907             LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
908
909             if (logicalImm.isValid()) {
910                 m_assembler.eor<32>(dest, src, logicalImm);
911                 return;
912             }
913
914             move(imm, getCachedDataTempRegisterIDAndInvalidate());
915             m_assembler.eor<32>(dest, src, dataTempRegister);
916         }
917     }
918
919     void xor64(RegisterID src, Address address)
920     {
921         load64(address, getCachedDataTempRegisterIDAndInvalidate());
922         m_assembler.eor<64>(dataTempRegister, dataTempRegister, src);
923         store64(dataTempRegister, address);
924     }
925
926     void xor64(RegisterID src, RegisterID dest)
927     {
928         xor64(dest, src, dest);
929     }
930
931     void xor64(RegisterID op1, RegisterID op2, RegisterID dest)
932     {
933         m_assembler.eor<64>(dest, op1, op2);
934     }
935
936     void xor64(TrustedImm32 imm, RegisterID dest)
937     {
938         xor64(imm, dest, dest);
939     }
940
941     void xor64(TrustedImm64 imm, RegisterID src, RegisterID dest)
942     {
943         if (imm.m_value == -1)
944             m_assembler.mvn<64>(dest, src);
945         else {
946             LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value);
947
948             if (logicalImm.isValid()) {
949                 m_assembler.eor<64>(dest, src, logicalImm);
950                 return;
951             }
952
953             move(imm, getCachedDataTempRegisterIDAndInvalidate());
954             m_assembler.eor<64>(dest, src, dataTempRegister);
955         }
956     }
957
958     void xor64(TrustedImm32 imm, RegisterID src, RegisterID dest)
959     {
960         if (imm.m_value == -1)
961             m_assembler.mvn<64>(dest, src);
962         else {
963             LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
964
965             if (logicalImm.isValid()) {
966                 m_assembler.eor<64>(dest, src, logicalImm);
967                 return;
968             }
969
970             signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
971             m_assembler.eor<64>(dest, src, dataTempRegister);
972         }
973     }
974
975     void not32(RegisterID src, RegisterID dest)
976     {
977         m_assembler.mvn<32>(dest, src);
978     }
979
980     void not64(RegisterID src, RegisterID dest)
981     {
982         m_assembler.mvn<64>(dest, src);
983     }
984
985     void not64(RegisterID srcDst)
986     {
987         m_assembler.mvn<64>(srcDst, srcDst);
988     }
989
990     // Memory access operations:
991
992     void load64(ImplicitAddress address, RegisterID dest)
993     {
994         if (tryLoadWithOffset<64>(dest, address.base, address.offset))
995             return;
996
997         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
998         m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
999     }
1000
1001     void load64(BaseIndex address, RegisterID dest)
1002     {
1003         if (!address.offset && (!address.scale || address.scale == 3)) {
1004             m_assembler.ldr<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1005             return;
1006         }
1007
1008         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1009         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1010         m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
1011     }
1012
1013     void load64(const void* address, RegisterID dest)
1014     {
1015         load<64>(address, dest);
1016     }
1017
1018     void load64(RegisterID src, PostIndex simm, RegisterID dest)
1019     {
1020         m_assembler.ldr<64>(dest, src, simm);
1021     }
1022
1023     DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest)
1024     {
1025         DataLabel32 label(this);
1026         signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
1027         m_assembler.ldr<64>(dest, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
1028         return label;
1029     }
1030     
1031     DataLabelCompact load64WithCompactAddressOffsetPatch(Address address, RegisterID dest)
1032     {
1033         ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
1034         DataLabelCompact label(this);
1035         m_assembler.ldr<64>(dest, address.base, address.offset);
1036         return label;
1037     }
1038
1039     void loadPair64(RegisterID src, RegisterID dest1, RegisterID dest2)
1040     {
1041         loadPair64(src, TrustedImm32(0), dest1, dest2);
1042     }
1043
1044     void loadPair64(RegisterID src, TrustedImm32 offset, RegisterID dest1, RegisterID dest2)
1045     {
1046         m_assembler.ldp<64>(dest1, dest2, src, offset.m_value);
1047     }
1048
1049     void loadPair64WithNonTemporalAccess(RegisterID src, RegisterID dest1, RegisterID dest2)
1050     {
1051         loadPair64WithNonTemporalAccess(src, TrustedImm32(0), dest1, dest2);
1052     }
1053
1054     void loadPair64WithNonTemporalAccess(RegisterID src, TrustedImm32 offset, RegisterID dest1, RegisterID dest2)
1055     {
1056         m_assembler.ldnp<64>(dest1, dest2, src, offset.m_value);
1057     }
1058
1059     void abortWithReason(AbortReason reason)
1060     {
1061         move(TrustedImm32(reason), dataTempRegister);
1062         breakpoint();
1063     }
1064
1065     void abortWithReason(AbortReason reason, intptr_t misc)
1066     {
1067         move(TrustedImm64(misc), memoryTempRegister);
1068         abortWithReason(reason);
1069     }
1070
1071     ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
1072     {
1073         ConvertibleLoadLabel result(this);
1074         ASSERT(!(address.offset & ~0xff8));
1075         m_assembler.ldr<64>(dest, address.base, address.offset);
1076         return result;
1077     }
1078
1079     void load32(ImplicitAddress address, RegisterID dest)
1080     {
1081         if (tryLoadWithOffset<32>(dest, address.base, address.offset))
1082             return;
1083
1084         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1085         m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
1086     }
1087
1088     void load32(BaseIndex address, RegisterID dest)
1089     {
1090         if (!address.offset && (!address.scale || address.scale == 2)) {
1091             m_assembler.ldr<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1092             return;
1093         }
1094
1095         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1096         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1097         m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
1098     }
1099
1100     void load32(const void* address, RegisterID dest)
1101     {
1102         load<32>(address, dest);
1103     }
1104
1105     DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
1106     {
1107         DataLabel32 label(this);
1108         signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
1109         m_assembler.ldr<32>(dest, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
1110         return label;
1111     }
1112     
1113     DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
1114     {
1115         ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
1116         DataLabelCompact label(this);
1117         m_assembler.ldr<32>(dest, address.base, address.offset);
1118         return label;
1119     }
1120
1121     void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
1122     {
1123         load32(address, dest);
1124     }
1125
1126     void load16(ImplicitAddress address, RegisterID dest)
1127     {
1128         if (tryLoadWithOffset<16>(dest, address.base, address.offset))
1129             return;
1130
1131         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1132         m_assembler.ldrh(dest, address.base, memoryTempRegister);
1133     }
1134     
1135     void load16(BaseIndex address, RegisterID dest)
1136     {
1137         if (!address.offset && (!address.scale || address.scale == 1)) {
1138             m_assembler.ldrh(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1139             return;
1140         }
1141
1142         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1143         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1144         m_assembler.ldrh(dest, address.base, memoryTempRegister);
1145     }
1146     
1147     void load16Unaligned(BaseIndex address, RegisterID dest)
1148     {
1149         load16(address, dest);
1150     }
1151
1152     void load16SignedExtendTo32(ImplicitAddress address, RegisterID dest)
1153     {
1154         if (tryLoadSignedWithOffset<16>(dest, address.base, address.offset))
1155             return;
1156
1157         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1158         m_assembler.ldrsh<32>(dest, address.base, memoryTempRegister);
1159     }
1160
1161     void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
1162     {
1163         if (!address.offset && (!address.scale || address.scale == 1)) {
1164             m_assembler.ldrsh<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1165             return;
1166         }
1167
1168         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1169         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1170         m_assembler.ldrsh<32>(dest, address.base, memoryTempRegister);
1171     }
1172
1173     void zeroExtend16To32(RegisterID src, RegisterID dest)
1174     {
1175         m_assembler.uxth<32>(dest, src);
1176     }
1177
1178     void signExtend16To32(RegisterID src, RegisterID dest)
1179     {
1180         m_assembler.sxth<32>(dest, src);
1181     }
1182
1183     void load8(ImplicitAddress address, RegisterID dest)
1184     {
1185         if (tryLoadWithOffset<8>(dest, address.base, address.offset))
1186             return;
1187
1188         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1189         m_assembler.ldrb(dest, address.base, memoryTempRegister);
1190     }
1191
1192     void load8(BaseIndex address, RegisterID dest)
1193     {
1194         if (!address.offset && !address.scale) {
1195             m_assembler.ldrb(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1196             return;
1197         }
1198
1199         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1200         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1201         m_assembler.ldrb(dest, address.base, memoryTempRegister);
1202     }
1203     
1204     void load8(const void* address, RegisterID dest)
1205     {
1206         moveToCachedReg(TrustedImmPtr(address), cachedMemoryTempRegister());
1207         m_assembler.ldrb(dest, memoryTempRegister, ARM64Registers::zr);
1208         if (dest == memoryTempRegister)
1209             cachedMemoryTempRegister().invalidate();
1210     }
1211
1212     void load8(RegisterID src, PostIndex simm, RegisterID dest)
1213     {
1214         m_assembler.ldrb(dest, src, simm);
1215     }
1216
1217     void load8SignedExtendTo32(ImplicitAddress address, RegisterID dest)
1218     {
1219         if (tryLoadSignedWithOffset<8>(dest, address.base, address.offset))
1220             return;
1221
1222         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1223         m_assembler.ldrsb<32>(dest, address.base, memoryTempRegister);
1224     }
1225
1226     void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
1227     {
1228         if (!address.offset && !address.scale) {
1229             m_assembler.ldrsb<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1230             return;
1231         }
1232
1233         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1234         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1235         m_assembler.ldrsb<32>(dest, address.base, memoryTempRegister);
1236     }
1237
1238     void load8SignedExtendTo32(const void* address, RegisterID dest)
1239     {
1240         moveToCachedReg(TrustedImmPtr(address), cachedMemoryTempRegister());
1241         m_assembler.ldrsb<32>(dest, memoryTempRegister, ARM64Registers::zr);
1242         if (dest == memoryTempRegister)
1243             cachedMemoryTempRegister().invalidate();
1244     }
1245
1246     void zeroExtend8To32(RegisterID src, RegisterID dest)
1247     {
1248         m_assembler.uxtb<32>(dest, src);
1249     }
1250
1251     void signExtend8To32(RegisterID src, RegisterID dest)
1252     {
1253         m_assembler.sxtb<32>(dest, src);
1254     }
1255
1256     void store64(RegisterID src, ImplicitAddress address)
1257     {
1258         if (tryStoreWithOffset<64>(src, address.base, address.offset))
1259             return;
1260
1261         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1262         m_assembler.str<64>(src, address.base, memoryTempRegister);
1263     }
1264
1265     void store64(RegisterID src, BaseIndex address)
1266     {
1267         if (!address.offset && (!address.scale || address.scale == 3)) {
1268             m_assembler.str<64>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1269             return;
1270         }
1271
1272         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1273         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1274         m_assembler.str<64>(src, address.base, memoryTempRegister);
1275     }
1276     
1277     void store64(RegisterID src, const void* address)
1278     {
1279         store<64>(src, address);
1280     }
1281
1282     void store64(TrustedImm32 imm, ImplicitAddress address)
1283     {
1284         store64(TrustedImm64(imm.m_value), address);
1285     }
1286
1287     void store64(TrustedImm64 imm, ImplicitAddress address)
1288     {
1289         if (!imm.m_value) {
1290             store64(ARM64Registers::zr, address);
1291             return;
1292         }
1293
1294         moveToCachedReg(imm, dataMemoryTempRegister());
1295         store64(dataTempRegister, address);
1296     }
1297
1298     void store64(TrustedImm64 imm, BaseIndex address)
1299     {
1300         if (!imm.m_value) {
1301             store64(ARM64Registers::zr, address);
1302             return;
1303         }
1304
1305         moveToCachedReg(imm, dataMemoryTempRegister());
1306         store64(dataTempRegister, address);
1307     }
1308
1309     void store64(RegisterID src, RegisterID dest, PostIndex simm)
1310     {
1311         m_assembler.str<64>(src, dest, simm);
1312     }
1313     
1314     DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address)
1315     {
1316         DataLabel32 label(this);
1317         signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
1318         m_assembler.str<64>(src, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
1319         return label;
1320     }
1321
1322     void storePair64(RegisterID src1, RegisterID src2, RegisterID dest)
1323     {
1324         storePair64(src1, src2, dest, TrustedImm32(0));
1325     }
1326
1327     void storePair64(RegisterID src1, RegisterID src2, RegisterID dest, TrustedImm32 offset)
1328     {
1329         m_assembler.stp<64>(src1, src2, dest, offset.m_value);
1330     }
1331
1332     void storePair64WithNonTemporalAccess(RegisterID src1, RegisterID src2, RegisterID dest)
1333     {
1334         storePair64WithNonTemporalAccess(src1, src2, dest, TrustedImm32(0));
1335     }
1336
1337     void storePair64WithNonTemporalAccess(RegisterID src1, RegisterID src2, RegisterID dest, TrustedImm32 offset)
1338     {
1339         m_assembler.stnp<64>(src1, src2, dest, offset.m_value);
1340     }
1341
1342     void store32(RegisterID src, ImplicitAddress address)
1343     {
1344         if (tryStoreWithOffset<32>(src, address.base, address.offset))
1345             return;
1346
1347         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1348         m_assembler.str<32>(src, address.base, memoryTempRegister);
1349     }
1350
1351     void store32(RegisterID src, BaseIndex address)
1352     {
1353         if (!address.offset && (!address.scale || address.scale == 2)) {
1354             m_assembler.str<32>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1355             return;
1356         }
1357
1358         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1359         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1360         m_assembler.str<32>(src, address.base, memoryTempRegister);
1361     }
1362
1363     void store32(RegisterID src, const void* address)
1364     {
1365         store<32>(src, address);
1366     }
1367
1368     void store32(TrustedImm32 imm, ImplicitAddress address)
1369     {
1370         if (!imm.m_value) {
1371             store32(ARM64Registers::zr, address);
1372             return;
1373         }
1374
1375         moveToCachedReg(imm, dataMemoryTempRegister());
1376         store32(dataTempRegister, address);
1377     }
1378
1379     void store32(TrustedImm32 imm, BaseIndex address)
1380     {
1381         if (!imm.m_value) {
1382             store32(ARM64Registers::zr, address);
1383             return;
1384         }
1385
1386         moveToCachedReg(imm, dataMemoryTempRegister());
1387         store32(dataTempRegister, address);
1388     }
1389
1390     void store32(TrustedImm32 imm, const void* address)
1391     {
1392         if (!imm.m_value) {
1393             store32(ARM64Registers::zr, address);
1394             return;
1395         }
1396
1397         moveToCachedReg(imm, dataMemoryTempRegister());
1398         store32(dataTempRegister, address);
1399     }
1400
1401     void storeZero32(ImplicitAddress address)
1402     {
1403         store32(ARM64Registers::zr, address);
1404     }
1405
1406     void storeZero32(BaseIndex address)
1407     {
1408         store32(ARM64Registers::zr, address);
1409     }
1410
1411     DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
1412     {
1413         DataLabel32 label(this);
1414         signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
1415         m_assembler.str<32>(src, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
1416         return label;
1417     }
1418
1419     void store16(RegisterID src, ImplicitAddress address)
1420     {
1421         if (tryStoreWithOffset<16>(src, address.base, address.offset))
1422             return;
1423
1424         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1425         m_assembler.strh(src, address.base, memoryTempRegister);
1426     }
1427
1428     void store16(RegisterID src, BaseIndex address)
1429     {
1430         if (!address.offset && (!address.scale || address.scale == 1)) {
1431             m_assembler.strh(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1432             return;
1433         }
1434
1435         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1436         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1437         m_assembler.strh(src, address.base, memoryTempRegister);
1438     }
1439
1440     void store8(RegisterID src, BaseIndex address)
1441     {
1442         if (!address.offset && !address.scale) {
1443             m_assembler.strb(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1444             return;
1445         }
1446
1447         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1448         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1449         m_assembler.strb(src, address.base, memoryTempRegister);
1450     }
1451
1452     void store8(RegisterID src, void* address)
1453     {
1454         move(TrustedImmPtr(address), getCachedMemoryTempRegisterIDAndInvalidate());
1455         m_assembler.strb(src, memoryTempRegister, 0);
1456     }
1457
1458     void store8(RegisterID src, ImplicitAddress address)
1459     {
1460         if (tryStoreWithOffset<8>(src, address.base, address.offset))
1461             return;
1462
1463         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1464         m_assembler.strb(src, address.base, memoryTempRegister);
1465     }
1466
1467     void store8(TrustedImm32 imm, void* address)
1468     {
1469         TrustedImm32 imm8(static_cast<int8_t>(imm.m_value));
1470         if (!imm8.m_value) {
1471             store8(ARM64Registers::zr, address);
1472             return;
1473         }
1474
1475         move(imm8, getCachedDataTempRegisterIDAndInvalidate());
1476         store8(dataTempRegister, address);
1477     }
1478
1479     void store8(TrustedImm32 imm, ImplicitAddress address)
1480     {
1481         TrustedImm32 imm8(static_cast<int8_t>(imm.m_value));
1482         if (!imm8.m_value) {
1483             store8(ARM64Registers::zr, address);
1484             return;
1485         }
1486
1487         move(imm8, getCachedDataTempRegisterIDAndInvalidate());
1488         store8(dataTempRegister, address);
1489     }
1490
1491     void store8(RegisterID src, RegisterID dest, PostIndex simm)
1492     {
1493         m_assembler.strb(src, dest, simm);
1494     }
1495
1496     // Floating-point operations:
1497
1498     static bool supportsFloatingPoint() { return true; }
1499     static bool supportsFloatingPointTruncate() { return true; }
1500     static bool supportsFloatingPointSqrt() { return true; }
1501     static bool supportsFloatingPointAbs() { return true; }
1502     static bool supportsFloatingPointRounding() { return true; }
1503
1504     enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
1505
1506     void absDouble(FPRegisterID src, FPRegisterID dest)
1507     {
1508         m_assembler.fabs<64>(dest, src);
1509     }
1510
1511     void absFloat(FPRegisterID src, FPRegisterID dest)
1512     {
1513         m_assembler.fabs<32>(dest, src);
1514     }
1515
1516     void addDouble(FPRegisterID src, FPRegisterID dest)
1517     {
1518         addDouble(dest, src, dest);
1519     }
1520
1521     void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1522     {
1523         m_assembler.fadd<64>(dest, op1, op2);
1524     }
1525
1526     void addDouble(Address src, FPRegisterID dest)
1527     {
1528         loadDouble(src, fpTempRegister);
1529         addDouble(fpTempRegister, dest);
1530     }
1531
1532     void addDouble(AbsoluteAddress address, FPRegisterID dest)
1533     {
1534         loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister);
1535         addDouble(fpTempRegister, dest);
1536     }
1537
1538     void addFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1539     {
1540         m_assembler.fadd<32>(dest, op1, op2);
1541     }
1542
1543     void ceilDouble(FPRegisterID src, FPRegisterID dest)
1544     {
1545         m_assembler.frintp<64>(dest, src);
1546     }
1547
1548     void ceilFloat(FPRegisterID src, FPRegisterID dest)
1549     {
1550         m_assembler.frintp<32>(dest, src);
1551     }
1552
1553     void floorDouble(FPRegisterID src, FPRegisterID dest)
1554     {
1555         m_assembler.frintm<64>(dest, src);
1556     }
1557
1558     void floorFloat(FPRegisterID src, FPRegisterID dest)
1559     {
1560         m_assembler.frintm<32>(dest, src);
1561     }
1562
1563     void roundTowardZeroDouble(FPRegisterID src, FPRegisterID dest)
1564     {
1565         m_assembler.frintz<64>(dest, src);
1566     }
1567
1568     void roundTowardZeroFloat(FPRegisterID src, FPRegisterID dest)
1569     {
1570         m_assembler.frintz<32>(dest, src);
1571     }
1572
1573     // Convert 'src' to an integer, and places the resulting 'dest'.
1574     // If the result is not representable as a 32 bit value, branch.
1575     // May also branch for some values that are representable in 32 bits
1576     // (specifically, in this case, 0).
1577     void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true)
1578     {
1579         m_assembler.fcvtns<32, 64>(dest, src);
1580
1581         // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
1582         m_assembler.scvtf<64, 32>(fpTempRegister, dest);
1583         failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister));
1584
1585         // Test for negative zero.
1586         if (negZeroCheck) {
1587             Jump valueIsNonZero = branchTest32(NonZero, dest);
1588             RegisterID scratch = getCachedMemoryTempRegisterIDAndInvalidate();
1589             m_assembler.fmov<64>(scratch, src);
1590             failureCases.append(makeTestBitAndBranch(scratch, 63, IsNonZero));
1591             valueIsNonZero.link(this);
1592         }
1593     }
1594
1595     Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
1596     {
1597         m_assembler.fcmp<64>(left, right);
1598         return jumpAfterFloatingPointCompare(cond);
1599     }
1600
1601     Jump branchFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
1602     {
1603         m_assembler.fcmp<32>(left, right);
1604         return jumpAfterFloatingPointCompare(cond);
1605     }
1606
1607     Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
1608     {
1609         m_assembler.fcmp_0<64>(reg);
1610         Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1611         Jump result = makeBranch(ARM64Assembler::ConditionNE);
1612         unordered.link(this);
1613         return result;
1614     }
1615
1616     Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID)
1617     {
1618         m_assembler.fcmp_0<64>(reg);
1619         Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1620         Jump notEqual = makeBranch(ARM64Assembler::ConditionNE);
1621         unordered.link(this);
1622         // We get here if either unordered or equal.
1623         Jump result = jump();
1624         notEqual.link(this);
1625         return result;
1626     }
1627
1628     Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
1629     {
1630         // Truncate to a 64-bit integer in dataTempRegister, copy the low 32-bit to dest.
1631         m_assembler.fcvtzs<64, 64>(getCachedDataTempRegisterIDAndInvalidate(), src);
1632         zeroExtend32ToPtr(dataTempRegister, dest);
1633         // Check thlow 32-bits sign extend to be equal to the full value.
1634         m_assembler.cmp<64>(dataTempRegister, dataTempRegister, ARM64Assembler::SXTW, 0);
1635         return Jump(makeBranch(branchType == BranchIfTruncateSuccessful ? Equal : NotEqual));
1636     }
1637
1638     void convertDoubleToFloat(FPRegisterID src, FPRegisterID dest)
1639     {
1640         m_assembler.fcvt<32, 64>(dest, src);
1641     }
1642
1643     void convertFloatToDouble(FPRegisterID src, FPRegisterID dest)
1644     {
1645         m_assembler.fcvt<64, 32>(dest, src);
1646     }
1647     
1648     void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest)
1649     {
1650         move(imm, getCachedDataTempRegisterIDAndInvalidate());
1651         convertInt32ToDouble(dataTempRegister, dest);
1652     }
1653     
1654     void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
1655     {
1656         m_assembler.scvtf<64, 32>(dest, src);
1657     }
1658
1659     void convertInt32ToDouble(Address address, FPRegisterID dest)
1660     {
1661         load32(address, getCachedDataTempRegisterIDAndInvalidate());
1662         convertInt32ToDouble(dataTempRegister, dest);
1663     }
1664
1665     void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest)
1666     {
1667         load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1668         convertInt32ToDouble(dataTempRegister, dest);
1669     }
1670
1671     void convertInt32ToFloat(RegisterID src, FPRegisterID dest)
1672     {
1673         m_assembler.scvtf<32, 32>(dest, src);
1674     }
1675     
1676     void convertInt64ToDouble(RegisterID src, FPRegisterID dest)
1677     {
1678         m_assembler.scvtf<64, 64>(dest, src);
1679     }
1680
1681     void convertInt64ToFloat(RegisterID src, FPRegisterID dest)
1682     {
1683         m_assembler.scvtf<32, 64>(dest, src);
1684     }
1685     
1686     void divDouble(FPRegisterID src, FPRegisterID dest)
1687     {
1688         divDouble(dest, src, dest);
1689     }
1690
1691     void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1692     {
1693         m_assembler.fdiv<64>(dest, op1, op2);
1694     }
1695
1696     void divFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1697     {
1698         m_assembler.fdiv<32>(dest, op1, op2);
1699     }
1700
1701     void loadDouble(ImplicitAddress address, FPRegisterID dest)
1702     {
1703         if (tryLoadWithOffset<64>(dest, address.base, address.offset))
1704             return;
1705
1706         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1707         m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
1708     }
1709
1710     void loadDouble(BaseIndex address, FPRegisterID dest)
1711     {
1712         if (!address.offset && (!address.scale || address.scale == 3)) {
1713             m_assembler.ldr<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1714             return;
1715         }
1716
1717         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1718         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1719         m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
1720     }
1721     
1722     void loadDouble(TrustedImmPtr address, FPRegisterID dest)
1723     {
1724         moveToCachedReg(address, cachedMemoryTempRegister());
1725         m_assembler.ldr<64>(dest, memoryTempRegister, ARM64Registers::zr);
1726     }
1727
1728     void loadFloat(ImplicitAddress address, FPRegisterID dest)
1729     {
1730         if (tryLoadWithOffset<32>(dest, address.base, address.offset))
1731             return;
1732
1733         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1734         m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
1735     }
1736
1737     void loadFloat(BaseIndex address, FPRegisterID dest)
1738     {
1739         if (!address.offset && (!address.scale || address.scale == 2)) {
1740             m_assembler.ldr<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1741             return;
1742         }
1743
1744         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1745         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1746         m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
1747     }
1748
1749     void moveDouble(FPRegisterID src, FPRegisterID dest)
1750     {
1751         m_assembler.fmov<64>(dest, src);
1752     }
1753
1754     void moveZeroToDouble(FPRegisterID reg)
1755     {
1756         m_assembler.fmov<64>(reg, ARM64Registers::zr);
1757     }
1758
1759     void moveDoubleTo64(FPRegisterID src, RegisterID dest)
1760     {
1761         m_assembler.fmov<64>(dest, src);
1762     }
1763
1764     void moveFloatTo32(FPRegisterID src, RegisterID dest)
1765     {
1766         m_assembler.fmov<32>(dest, src);
1767     }
1768
1769     void move64ToDouble(RegisterID src, FPRegisterID dest)
1770     {
1771         m_assembler.fmov<64>(dest, src);
1772     }
1773
1774     void move32ToFloat(RegisterID src, FPRegisterID dest)
1775     {
1776         m_assembler.fmov<32>(dest, src);
1777     }
1778
1779     void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
1780     {
1781         m_assembler.fcmp<64>(left, right);
1782         moveConditionallyAfterFloatingPointCompare<64>(cond, src, dest);
1783     }
1784
1785     void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
1786     {
1787         m_assembler.fcmp<64>(left, right);
1788         moveConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest);
1789     }
1790
1791     void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
1792     {
1793         m_assembler.fcmp<32>(left, right);
1794         moveConditionallyAfterFloatingPointCompare<64>(cond, src, dest);
1795     }
1796
1797     void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
1798     {
1799         m_assembler.fcmp<32>(left, right);
1800         moveConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest);
1801     }
1802
1803     template<int datasize>
1804     void moveConditionallyAfterFloatingPointCompare(DoubleCondition cond, RegisterID src, RegisterID dest)
1805     {
1806         if (cond == DoubleNotEqual) {
1807             Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1808             m_assembler.csel<datasize>(dest, src, dest, ARM64Assembler::ConditionNE);
1809             unordered.link(this);
1810             return;
1811         }
1812         if (cond == DoubleEqualOrUnordered) {
1813             // If the compare is unordered, src is copied to dest and the
1814             // next csel has all arguments equal to src.
1815             // If the compare is ordered, dest is unchanged and EQ decides
1816             // what value to set.
1817             m_assembler.csel<datasize>(dest, src, dest, ARM64Assembler::ConditionVS);
1818             m_assembler.csel<datasize>(dest, src, dest, ARM64Assembler::ConditionEQ);
1819             return;
1820         }
1821         m_assembler.csel<datasize>(dest, src, dest, ARM64Condition(cond));
1822     }
1823
1824     template<int datasize>
1825     void moveConditionallyAfterFloatingPointCompare(DoubleCondition cond, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
1826     {
1827         if (cond == DoubleNotEqual) {
1828             Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1829             m_assembler.csel<datasize>(dest, thenCase, elseCase, ARM64Assembler::ConditionNE);
1830             unordered.link(this);
1831             return;
1832         }
1833         if (cond == DoubleEqualOrUnordered) {
1834             // If the compare is unordered, thenCase is copied to elseCase and the
1835             // next csel has all arguments equal to thenCase.
1836             // If the compare is ordered, dest is unchanged and EQ decides
1837             // what value to set.
1838             m_assembler.csel<datasize>(elseCase, thenCase, elseCase, ARM64Assembler::ConditionVS);
1839             m_assembler.csel<datasize>(dest, thenCase, elseCase, ARM64Assembler::ConditionEQ);
1840             return;
1841         }
1842         m_assembler.csel<datasize>(dest, thenCase, elseCase, ARM64Condition(cond));
1843     }
1844
1845     template<int datasize>
1846     void moveDoubleConditionallyAfterFloatingPointCompare(DoubleCondition cond, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
1847     {
1848         if (cond == DoubleNotEqual) {
1849             Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1850             m_assembler.fcsel<datasize>(dest, thenCase, elseCase, ARM64Assembler::ConditionNE);
1851             unordered.link(this);
1852             return;
1853         }
1854         if (cond == DoubleEqualOrUnordered) {
1855             // If the compare is unordered, thenCase is copied to elseCase and the
1856             // next csel has all arguments equal to thenCase.
1857             // If the compare is ordered, dest is unchanged and EQ decides
1858             // what value to set.
1859             m_assembler.fcsel<datasize>(elseCase, thenCase, elseCase, ARM64Assembler::ConditionVS);
1860             m_assembler.fcsel<datasize>(dest, thenCase, elseCase, ARM64Assembler::ConditionEQ);
1861             return;
1862         }
1863         m_assembler.fcsel<datasize>(dest, thenCase, elseCase, ARM64Condition(cond));
1864     }
1865
1866     void moveDoubleConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
1867     {
1868         m_assembler.fcmp<64>(left, right);
1869         moveDoubleConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest);
1870     }
1871
1872     void moveDoubleConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
1873     {
1874         m_assembler.fcmp<32>(left, right);
1875         moveDoubleConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest);
1876     }
1877
1878     void mulDouble(FPRegisterID src, FPRegisterID dest)
1879     {
1880         mulDouble(dest, src, dest);
1881     }
1882
1883     void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1884     {
1885         m_assembler.fmul<64>(dest, op1, op2);
1886     }
1887
1888     void mulDouble(Address src, FPRegisterID dest)
1889     {
1890         loadDouble(src, fpTempRegister);
1891         mulDouble(fpTempRegister, dest);
1892     }
1893
1894     void mulFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1895     {
1896         m_assembler.fmul<32>(dest, op1, op2);
1897     }
1898
1899     void andDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1900     {
1901         m_assembler.vand<64>(dest, op1, op2);
1902     }
1903
1904     void andFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1905     {
1906         andDouble(op1, op2, dest);
1907     }
1908
1909     void negateDouble(FPRegisterID src, FPRegisterID dest)
1910     {
1911         m_assembler.fneg<64>(dest, src);
1912     }
1913
1914     void negateFloat(FPRegisterID src, FPRegisterID dest)
1915     {
1916         m_assembler.fneg<32>(dest, src);
1917     }
1918
1919     void sqrtDouble(FPRegisterID src, FPRegisterID dest)
1920     {
1921         m_assembler.fsqrt<64>(dest, src);
1922     }
1923
1924     void sqrtFloat(FPRegisterID src, FPRegisterID dest)
1925     {
1926         m_assembler.fsqrt<32>(dest, src);
1927     }
1928
1929     void storeDouble(FPRegisterID src, ImplicitAddress address)
1930     {
1931         if (tryStoreWithOffset<64>(src, address.base, address.offset))
1932             return;
1933
1934         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1935         m_assembler.str<64>(src, address.base, memoryTempRegister);
1936     }
1937
1938     void storeDouble(FPRegisterID src, TrustedImmPtr address)
1939     {
1940         moveToCachedReg(address, cachedMemoryTempRegister());
1941         m_assembler.str<64>(src, memoryTempRegister, ARM64Registers::zr);
1942     }
1943
1944     void storeDouble(FPRegisterID src, BaseIndex address)
1945     {
1946         if (!address.offset && (!address.scale || address.scale == 3)) {
1947             m_assembler.str<64>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1948             return;
1949         }
1950
1951         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1952         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1953         m_assembler.str<64>(src, address.base, memoryTempRegister);
1954     }
1955
1956     void storeFloat(FPRegisterID src, ImplicitAddress address)
1957     {
1958         if (tryStoreWithOffset<32>(src, address.base, address.offset))
1959             return;
1960
1961         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1962         m_assembler.str<32>(src, address.base, memoryTempRegister);
1963     }
1964     
1965     void storeFloat(FPRegisterID src, BaseIndex address)
1966     {
1967         if (!address.offset && (!address.scale || address.scale == 2)) {
1968             m_assembler.str<32>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1969             return;
1970         }
1971
1972         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1973         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1974         m_assembler.str<32>(src, address.base, memoryTempRegister);
1975     }
1976
1977     void subDouble(FPRegisterID src, FPRegisterID dest)
1978     {
1979         subDouble(dest, src, dest);
1980     }
1981
1982     void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1983     {
1984         m_assembler.fsub<64>(dest, op1, op2);
1985     }
1986
1987     void subDouble(Address src, FPRegisterID dest)
1988     {
1989         loadDouble(src, fpTempRegister);
1990         subDouble(fpTempRegister, dest);
1991     }
1992
1993     void subFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1994     {
1995         m_assembler.fsub<32>(dest, op1, op2);
1996     }
1997
1998     // Result is undefined if the value is outside of the integer range.
1999     void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
2000     {
2001         m_assembler.fcvtzs<32, 64>(dest, src);
2002     }
2003
2004     void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
2005     {
2006         m_assembler.fcvtzu<32, 64>(dest, src);
2007     }
2008
2009
2010     // Stack manipulation operations:
2011     //
2012     // The ABI is assumed to provide a stack abstraction to memory,
2013     // containing machine word sized units of data. Push and pop
2014     // operations add and remove a single register sized unit of data
2015     // to or from the stack. These operations are not supported on
2016     // ARM64. Peek and poke operations read or write values on the
2017     // stack, without moving the current stack position. Additionally,
2018     // there are popToRestore and pushToSave operations, which are
2019     // designed just for quick-and-dirty saving and restoring of
2020     // temporary values. These operations don't claim to have any
2021     // ABI compatibility.
2022     
2023     void pop(RegisterID) NO_RETURN_DUE_TO_CRASH
2024     {
2025         CRASH();
2026     }
2027
2028     void push(RegisterID) NO_RETURN_DUE_TO_CRASH
2029     {
2030         CRASH();
2031     }
2032
2033     void push(Address) NO_RETURN_DUE_TO_CRASH
2034     {
2035         CRASH();
2036     }
2037
2038     void push(TrustedImm32) NO_RETURN_DUE_TO_CRASH
2039     {
2040         CRASH();
2041     }
2042
2043     void popPair(RegisterID dest1, RegisterID dest2)
2044     {
2045         m_assembler.ldp<64>(dest1, dest2, ARM64Registers::sp, PairPostIndex(16));
2046     }
2047
2048     void pushPair(RegisterID src1, RegisterID src2)
2049     {
2050         m_assembler.stp<64>(src1, src2, ARM64Registers::sp, PairPreIndex(-16));
2051     }
2052
2053     void popToRestore(RegisterID dest)
2054     {
2055         m_assembler.ldr<64>(dest, ARM64Registers::sp, PostIndex(16));
2056     }
2057
2058     void pushToSave(RegisterID src)
2059     {
2060         m_assembler.str<64>(src, ARM64Registers::sp, PreIndex(-16));
2061     }
2062     
2063     void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm)
2064     {
2065         RegisterID reg = dataTempRegister;
2066         pushPair(reg, reg);
2067         move(imm, reg);
2068         store64(reg, stackPointerRegister);
2069         load64(Address(stackPointerRegister, 8), reg);
2070     }
2071
2072     void pushToSave(Address address)
2073     {
2074         load32(address, getCachedDataTempRegisterIDAndInvalidate());
2075         pushToSave(dataTempRegister);
2076     }
2077
2078     void pushToSave(TrustedImm32 imm)
2079     {
2080         move(imm, getCachedDataTempRegisterIDAndInvalidate());
2081         pushToSave(dataTempRegister);
2082     }
2083     
2084     void popToRestore(FPRegisterID dest)
2085     {
2086         loadDouble(stackPointerRegister, dest);
2087         add64(TrustedImm32(16), stackPointerRegister);
2088     }
2089     
2090     void pushToSave(FPRegisterID src)
2091     {
2092         sub64(TrustedImm32(16), stackPointerRegister);
2093         storeDouble(src, stackPointerRegister);
2094     }
2095
2096     static ptrdiff_t pushToSaveByteOffset() { return 16; }
2097
2098     // Register move operations:
2099
2100     void move(RegisterID src, RegisterID dest)
2101     {
2102         if (src != dest)
2103             m_assembler.mov<64>(dest, src);
2104     }
2105
2106     void move(TrustedImm32 imm, RegisterID dest)
2107     {
2108         moveInternal<TrustedImm32, int32_t>(imm, dest);
2109     }
2110
2111     void move(TrustedImmPtr imm, RegisterID dest)
2112     {
2113         moveInternal<TrustedImmPtr, intptr_t>(imm, dest);
2114     }
2115
2116     void move(TrustedImm64 imm, RegisterID dest)
2117     {
2118         moveInternal<TrustedImm64, int64_t>(imm, dest);
2119     }
2120
2121     void swap(RegisterID reg1, RegisterID reg2)
2122     {
2123         move(reg1, getCachedDataTempRegisterIDAndInvalidate());
2124         move(reg2, reg1);
2125         move(dataTempRegister, reg2);
2126     }
2127
2128     void signExtend32ToPtr(TrustedImm32 imm, RegisterID dest)
2129     {
2130         move(TrustedImmPtr(reinterpret_cast<void*>(static_cast<intptr_t>(imm.m_value))), dest);
2131     }
2132     
2133     void signExtend32ToPtr(RegisterID src, RegisterID dest)
2134     {
2135         m_assembler.sxtw(dest, src);
2136     }
2137
2138     void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
2139     {
2140         m_assembler.uxtw(dest, src);
2141     }
2142
2143     void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest)
2144     {
2145         m_assembler.cmp<32>(left, right);
2146         m_assembler.csel<32>(dest, src, dest, ARM64Condition(cond));
2147     }
2148
2149     void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2150     {
2151         m_assembler.cmp<32>(left, right);
2152         m_assembler.csel<32>(dest, thenCase, elseCase, ARM64Condition(cond));
2153     }
2154
2155     void moveConditionally32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2156     {
2157         if (!right.m_value) {
2158             if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
2159                 moveConditionallyTest32(*resultCondition, left, left, thenCase, elseCase, dest);
2160                 return;
2161             }
2162         }
2163
2164         if (isUInt12(right.m_value))
2165             m_assembler.cmp<32>(left, UInt12(right.m_value));
2166         else if (isUInt12(-right.m_value))
2167             m_assembler.cmn<32>(left, UInt12(-right.m_value));
2168         else {
2169             moveToCachedReg(right, dataMemoryTempRegister());
2170             m_assembler.cmp<32>(left, dataTempRegister);
2171         }
2172         m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2173     }
2174
2175     void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest)
2176     {
2177         m_assembler.cmp<64>(left, right);
2178         m_assembler.csel<64>(dest, src, dest, ARM64Condition(cond));
2179     }
2180
2181     void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2182     {
2183         m_assembler.cmp<64>(left, right);
2184         m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2185     }
2186
2187     void moveConditionally64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2188     {
2189         if (!right.m_value) {
2190             if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
2191                 moveConditionallyTest64(*resultCondition, left, left, thenCase, elseCase, dest);
2192                 return;
2193             }
2194         }
2195
2196         if (isUInt12(right.m_value))
2197             m_assembler.cmp<64>(left, UInt12(right.m_value));
2198         else if (isUInt12(-right.m_value))
2199             m_assembler.cmn<64>(left, UInt12(-right.m_value));
2200         else {
2201             moveToCachedReg(right, dataMemoryTempRegister());
2202             m_assembler.cmp<64>(left, dataTempRegister);
2203         }
2204         m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2205     }
2206
2207     void moveConditionallyTest32(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest)
2208     {
2209         m_assembler.tst<32>(testReg, mask);
2210         m_assembler.csel<32>(dest, src, dest, ARM64Condition(cond));
2211     }
2212
2213     void moveConditionallyTest32(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2214     {
2215         m_assembler.tst<32>(left, right);
2216         m_assembler.csel<32>(dest, thenCase, elseCase, ARM64Condition(cond));
2217     }
2218
2219     void moveConditionallyTest32(ResultCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2220     {
2221         test32(left, right);
2222         m_assembler.csel<32>(dest, thenCase, elseCase, ARM64Condition(cond));
2223     }
2224
2225     void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest)
2226     {
2227         m_assembler.tst<64>(testReg, mask);
2228         m_assembler.csel<64>(dest, src, dest, ARM64Condition(cond));
2229     }
2230
2231     void moveConditionallyTest64(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2232     {
2233         m_assembler.tst<64>(left, right);
2234         m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2235     }
2236
2237     void moveDoubleConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2238     {
2239         m_assembler.cmp<32>(left, right);
2240         m_assembler.fcsel<32>(dest, thenCase, elseCase, ARM64Condition(cond));
2241     }
2242
2243     void moveDoubleConditionally32(RelationalCondition cond, RegisterID left, TrustedImm32 right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2244     {
2245         if (!right.m_value) {
2246             if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
2247                 moveDoubleConditionallyTest32(*resultCondition, left, left, thenCase, elseCase, dest);
2248                 return;
2249             }
2250         }
2251
2252         if (isUInt12(right.m_value))
2253             m_assembler.cmp<32>(left, UInt12(right.m_value));
2254         else if (isUInt12(-right.m_value))
2255             m_assembler.cmn<32>(left, UInt12(-right.m_value));
2256         else {
2257             moveToCachedReg(right, dataMemoryTempRegister());
2258             m_assembler.cmp<32>(left, dataTempRegister);
2259         }
2260         m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2261     }
2262
2263     void moveDoubleConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2264     {
2265         m_assembler.cmp<64>(left, right);
2266         m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2267     }
2268
2269     void moveDoubleConditionally64(RelationalCondition cond, RegisterID left, TrustedImm32 right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2270     {
2271         if (!right.m_value) {
2272             if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
2273                 moveDoubleConditionallyTest64(*resultCondition, left, left, thenCase, elseCase, dest);
2274                 return;
2275             }
2276         }
2277
2278         if (isUInt12(right.m_value))
2279             m_assembler.cmp<64>(left, UInt12(right.m_value));
2280         else if (isUInt12(-right.m_value))
2281             m_assembler.cmn<64>(left, UInt12(-right.m_value));
2282         else {
2283             moveToCachedReg(right, dataMemoryTempRegister());
2284             m_assembler.cmp<64>(left, dataTempRegister);
2285         }
2286         m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2287     }
2288
2289     void moveDoubleConditionallyTest32(ResultCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2290     {
2291         m_assembler.tst<32>(left, right);
2292         m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2293     }
2294
2295     void moveDoubleConditionallyTest32(ResultCondition cond, RegisterID left, TrustedImm32 right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2296     {
2297         test32(left, right);
2298         m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2299     }
2300
2301     void moveDoubleConditionallyTest64(ResultCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2302     {
2303         m_assembler.tst<64>(left, right);
2304         m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2305     }
2306
2307     // Forwards / external control flow operations:
2308     //
2309     // This set of jump and conditional branch operations return a Jump
2310     // object which may linked at a later point, allow forwards jump,
2311     // or jumps that will require external linkage (after the code has been
2312     // relocated).
2313     //
2314     // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
2315     // respecitvely, for unsigned comparisons the names b, a, be, and ae are
2316     // used (representing the names 'below' and 'above').
2317     //
2318     // Operands to the comparision are provided in the expected order, e.g.
2319     // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
2320     // treated as a signed 32bit value, is less than or equal to 5.
2321     //
2322     // jz and jnz test whether the first operand is equal to zero, and take
2323     // an optional second operand of a mask under which to perform the test.
2324
2325     Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
2326     {
2327         m_assembler.cmp<32>(left, right);
2328         return Jump(makeBranch(cond));
2329     }
2330
2331     Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
2332     {
2333         if (!right.m_value) {
2334             if (auto resultCondition = commuteCompareToZeroIntoTest(cond))
2335                 return branchTest32(*resultCondition, left, left);
2336         }
2337
2338         if (isUInt12(right.m_value))
2339             m_assembler.cmp<32>(left, UInt12(right.m_value));
2340         else if (isUInt12(-right.m_value))
2341             m_assembler.cmn<32>(left, UInt12(-right.m_value));
2342         else {
2343             moveToCachedReg(right, dataMemoryTempRegister());
2344             m_assembler.cmp<32>(left, dataTempRegister);
2345         }
2346         return Jump(makeBranch(cond));
2347     }
2348
2349     Jump branch32(RelationalCondition cond, RegisterID left, Address right)
2350     {
2351         load32(right, getCachedMemoryTempRegisterIDAndInvalidate());
2352         return branch32(cond, left, memoryTempRegister);
2353     }
2354
2355     Jump branch32(RelationalCondition cond, Address left, RegisterID right)
2356     {
2357         load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
2358         return branch32(cond, memoryTempRegister, right);
2359     }
2360
2361     Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
2362     {
2363         load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
2364         return branch32(cond, memoryTempRegister, right);
2365     }
2366
2367     Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
2368     {
2369         load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
2370         return branch32(cond, memoryTempRegister, right);
2371     }
2372
2373     Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
2374     {
2375         load32(left.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
2376         return branch32(cond, dataTempRegister, right);
2377     }
2378
2379     Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
2380     {
2381         load32(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
2382         return branch32(cond, memoryTempRegister, right);
2383     }
2384
2385     Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right)
2386     {
2387         if (right == ARM64Registers::sp) {
2388             if (cond == Equal && left != ARM64Registers::sp) {
2389                 // CMP can only use SP for the left argument, since we are testing for equality, the order
2390                 // does not matter here.
2391                 std::swap(left, right);
2392             } else {
2393                 move(right, getCachedDataTempRegisterIDAndInvalidate());
2394                 right = dataTempRegister;
2395             }
2396         }
2397         m_assembler.cmp<64>(left, right);
2398         return Jump(makeBranch(cond));
2399     }
2400
2401     Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm32 right)
2402     {
2403         if (!right.m_value) {
2404             if (auto resultCondition = commuteCompareToZeroIntoTest(cond))
2405                 return branchTest64(*resultCondition, left, left);
2406         }
2407
2408         if (isUInt12(right.m_value))
2409             m_assembler.cmp<64>(left, UInt12(right.m_value));
2410         else if (isUInt12(-right.m_value))
2411             m_assembler.cmn<64>(left, UInt12(-right.m_value));
2412         else {
2413             moveToCachedReg(right, dataMemoryTempRegister());
2414             m_assembler.cmp<64>(left, dataTempRegister);
2415         }
2416         return Jump(makeBranch(cond));
2417     }
2418
2419     Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right)
2420     {
2421         intptr_t immediate = right.m_value;
2422         if (!immediate) {
2423             if (auto resultCondition = commuteCompareToZeroIntoTest(cond))
2424                 return branchTest64(*resultCondition, left, left);
2425         }
2426
2427         if (isUInt12(immediate))
2428             m_assembler.cmp<64>(left, UInt12(static_cast<int32_t>(immediate)));
2429         else if (isUInt12(-immediate))
2430             m_assembler.cmn<64>(left, UInt12(static_cast<int32_t>(-immediate)));
2431         else {
2432             moveToCachedReg(right, dataMemoryTempRegister());
2433             m_assembler.cmp<64>(left, dataTempRegister);
2434         }
2435         return Jump(makeBranch(cond));
2436     }
2437
2438     Jump branch64(RelationalCondition cond, RegisterID left, Address right)
2439     {
2440         load64(right, getCachedMemoryTempRegisterIDAndInvalidate());
2441         return branch64(cond, left, memoryTempRegister);
2442     }
2443
2444     Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
2445     {
2446         load64(left.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
2447         return branch64(cond, dataTempRegister, right);
2448     }
2449
2450     Jump branch64(RelationalCondition cond, Address left, RegisterID right)
2451     {
2452         load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
2453         return branch64(cond, memoryTempRegister, right);
2454     }
2455
2456     Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right)
2457     {
2458         load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
2459         return branch64(cond, memoryTempRegister, right);
2460     }
2461
2462     Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
2463     {
2464         load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
2465         return branch64(cond, memoryTempRegister, right);
2466     }
2467
2468     Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
2469     {
2470         TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
2471         MacroAssemblerHelpers::load8OnCondition(*this, cond, left, getCachedMemoryTempRegisterIDAndInvalidate());
2472         return branch32(cond, memoryTempRegister, right8);
2473     }
2474
2475     Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
2476     {
2477         TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
2478         MacroAssemblerHelpers::load8OnCondition(*this, cond, left, getCachedMemoryTempRegisterIDAndInvalidate());
2479         return branch32(cond, memoryTempRegister, right8);
2480     }
2481     
2482     Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
2483     {
2484         TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
2485         MacroAssemblerHelpers::load8OnCondition(*this, cond, left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
2486         return branch32(cond, memoryTempRegister, right8);
2487     }
2488     
2489     Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
2490     {
2491         if (reg == mask && (cond == Zero || cond == NonZero))
2492             return Jump(makeCompareAndBranch<32>(static_cast<ZeroCondition>(cond), reg));
2493         m_assembler.tst<32>(reg, mask);
2494         return Jump(makeBranch(cond));
2495     }
2496
2497     void test32(RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
2498     {
2499         if (mask.m_value == -1)
2500             m_assembler.tst<32>(reg, reg);
2501         else {
2502             LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value);
2503
2504             if (logicalImm.isValid())
2505                 m_assembler.tst<32>(reg, logicalImm);
2506             else {
2507                 move(mask, getCachedDataTempRegisterIDAndInvalidate());
2508                 m_assembler.tst<32>(reg, dataTempRegister);
2509             }
2510         }
2511     }
2512
2513     Jump branch(ResultCondition cond)
2514     {
2515         return Jump(makeBranch(cond));
2516     }
2517
2518     Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
2519     {
2520         if (mask.m_value == -1) {
2521             if ((cond == Zero) || (cond == NonZero))
2522                 return Jump(makeCompareAndBranch<32>(static_cast<ZeroCondition>(cond), reg));
2523             m_assembler.tst<32>(reg, reg);
2524         } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
2525             return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
2526         else {
2527             LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value);
2528             if (logicalImm.isValid()) {
2529                 m_assembler.tst<32>(reg, logicalImm);
2530                 return Jump(makeBranch(cond));
2531             }
2532
2533             move(mask, getCachedDataTempRegisterIDAndInvalidate());
2534             m_assembler.tst<32>(reg, dataTempRegister);
2535         }
2536         return Jump(makeBranch(cond));
2537     }
2538
2539     Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
2540     {
2541         load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
2542         return branchTest32(cond, memoryTempRegister, mask);
2543     }
2544
2545     Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
2546     {
2547         load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
2548         return branchTest32(cond, memoryTempRegister, mask);
2549     }
2550
2551     Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask)
2552     {
2553         if (reg == mask && (cond == Zero || cond == NonZero))
2554             return Jump(makeCompareAndBranch<64>(static_cast<ZeroCondition>(cond), reg));
2555         m_assembler.tst<64>(reg, mask);
2556         return Jump(makeBranch(cond));
2557     }
2558
2559     Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
2560     {
2561         if (mask.m_value == -1) {
2562             if ((cond == Zero) || (cond == NonZero))
2563                 return Jump(makeCompareAndBranch<64>(static_cast<ZeroCondition>(cond), reg));
2564             m_assembler.tst<64>(reg, reg);
2565         } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
2566             return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
2567         else {
2568             LogicalImmediate logicalImm = LogicalImmediate::create64(mask.m_value);
2569
2570             if (logicalImm.isValid()) {
2571                 m_assembler.tst<64>(reg, logicalImm);
2572                 return Jump(makeBranch(cond));
2573             }
2574
2575             signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
2576             m_assembler.tst<64>(reg, dataTempRegister);
2577         }
2578         return Jump(makeBranch(cond));
2579     }
2580
2581     Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm64 mask)
2582     {
2583         if (mask.m_value == -1) {
2584             if ((cond == Zero) || (cond == NonZero))
2585                 return Jump(makeCompareAndBranch<64>(static_cast<ZeroCondition>(cond), reg));
2586             m_assembler.tst<64>(reg, reg);
2587         } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
2588             return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
2589         else {
2590             LogicalImmediate logicalImm = LogicalImmediate::create64(mask.m_value);
2591
2592             if (logicalImm.isValid()) {
2593                 m_assembler.tst<64>(reg, logicalImm);
2594                 return Jump(makeBranch(cond));
2595             }
2596
2597             move(mask, getCachedDataTempRegisterIDAndInvalidate());
2598             m_assembler.tst<64>(reg, dataTempRegister);
2599         }
2600         return Jump(makeBranch(cond));
2601     }
2602
2603     Jump branchTest64(ResultCondition cond, Address address, RegisterID mask)
2604     {
2605         load64(address, getCachedDataTempRegisterIDAndInvalidate());
2606         return branchTest64(cond, dataTempRegister, mask);
2607     }
2608
2609     Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
2610     {
2611         load64(address, getCachedDataTempRegisterIDAndInvalidate());
2612         return branchTest64(cond, dataTempRegister, mask);
2613     }
2614
2615     Jump branchTest64(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
2616     {
2617         load64(address, getCachedDataTempRegisterIDAndInvalidate());
2618         return branchTest64(cond, dataTempRegister, mask);
2619     }
2620
2621     Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
2622     {
2623         load64(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
2624         return branchTest64(cond, dataTempRegister, mask);
2625     }
2626
2627     Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
2628     {
2629         TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
2630         MacroAssemblerHelpers::load8OnCondition(*this, cond, address, getCachedDataTempRegisterIDAndInvalidate());
2631         return branchTest32(cond, dataTempRegister, mask8);
2632     }
2633
2634     Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
2635     {
2636         TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
2637         MacroAssemblerHelpers::load8OnCondition(*this, cond, address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
2638         return branchTest32(cond, dataTempRegister, mask8);
2639     }
2640
2641     Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
2642     {
2643         TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
2644         move(TrustedImmPtr(reinterpret_cast<void*>(address.offset)), getCachedDataTempRegisterIDAndInvalidate());
2645
2646         if (MacroAssemblerHelpers::isUnsigned<MacroAssemblerARM64>(cond))
2647             m_assembler.ldrb(dataTempRegister, address.base, dataTempRegister);
2648         else
2649             m_assembler.ldrsb<32>(dataTempRegister, address.base, dataTempRegister);
2650
2651         return branchTest32(cond, dataTempRegister, mask8);
2652     }
2653
2654     Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
2655     {
2656         TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
2657         MacroAssemblerHelpers::load8OnCondition(*this, cond, address, getCachedDataTempRegisterIDAndInvalidate());
2658         return branchTest32(cond, dataTempRegister, mask8);
2659     }
2660
2661     Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
2662     {
2663         return branch32(cond, left, right);
2664     }
2665
2666
2667     // Arithmetic control flow operations:
2668     //
2669     // This set of conditional branch operations branch based
2670     // on the result of an arithmetic operation. The operation
2671     // is performed as normal, storing the result.
2672     //
2673     // * jz operations branch if the result is zero.
2674     // * jo operations branch if the (signed) arithmetic
2675     //   operation caused an overflow to occur.
2676     
2677     Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
2678     {
2679         m_assembler.add<32, S>(dest, op1, op2);
2680         return Jump(makeBranch(cond));
2681     }
2682
2683     Jump branchAdd32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
2684     {
2685         if (isUInt12(imm.m_value)) {
2686             m_assembler.add<32, S>(dest, op1, UInt12(imm.m_value));
2687             return Jump(makeBranch(cond));
2688         }
2689         if (isUInt12(-imm.m_value)) {
2690             m_assembler.sub<32, S>(dest, op1, UInt12(-imm.m_value));
2691             return Jump(makeBranch(cond));
2692         }
2693
2694         signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
2695         return branchAdd32(cond, op1, dataTempRegister, dest);
2696     }
2697
2698     Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
2699     {
2700         load32(src, getCachedDataTempRegisterIDAndInvalidate());
2701         return branchAdd32(cond, dest, dataTempRegister, dest);
2702     }
2703
2704     Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
2705     {
2706         return branchAdd32(cond, dest, src, dest);
2707     }
2708
2709     Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
2710     {
2711         return branchAdd32(cond, dest, imm, dest);
2712     }
2713
2714     Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress address)
2715     {
2716         load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
2717
2718         if (isUInt12(imm.m_value)) {
2719             m_assembler.add<32, S>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
2720             store32(dataTempRegister, address.m_ptr);
2721         } else if (isUInt12(-imm.m_value)) {
2722             m_assembler.sub<32, S>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
2723             store32(dataTempRegister, address.m_ptr);
2724         } else {
2725             move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
2726             m_assembler.add<32, S>(dataTempRegister, dataTempRegister, memoryTempRegister);
2727             store32(dataTempRegister, address.m_ptr);
2728         }
2729
2730         return Jump(makeBranch(cond));
2731     }
2732
2733     Jump branchAdd64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
2734     {
2735         m_assembler.add<64, S>(dest, op1, op2);
2736         return Jump(makeBranch(cond));
2737     }
2738
2739     Jump branchAdd64(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
2740     {
2741         if (isUInt12(imm.m_value)) {
2742             m_assembler.add<64, S>(dest, op1, UInt12(imm.m_value));
2743             return Jump(makeBranch(cond));
2744         }
2745         if (isUInt12(-imm.m_value)) {
2746             m_assembler.sub<64, S>(dest, op1, UInt12(-imm.m_value));
2747             return Jump(makeBranch(cond));
2748         }
2749
2750         move(imm, getCachedDataTempRegisterIDAndInvalidate());
2751         return branchAdd64(cond, op1, dataTempRegister, dest);
2752     }
2753
2754     Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest)
2755     {
2756         return branchAdd64(cond, dest, src, dest);
2757     }
2758
2759     Jump branchAdd64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
2760     {
2761         return branchAdd64(cond, dest, imm, dest);
2762     }
2763
2764     Jump branchAdd64(RelationalCondition cond, TrustedImm32 imm, RegisterID dest)
2765     {
2766         ASSERT(isUInt12(imm.m_value));
2767         m_assembler.add<64, S>(dest, dest, UInt12(imm.m_value));
2768         return Jump(makeBranch(cond));
2769     }
2770
2771     Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID scratch1, RegisterID scratch2, RegisterID dest)
2772     {
2773         ASSERT(cond != Signed);
2774
2775         if (cond != Overflow) {
2776             m_assembler.mul<32>(dest, src1, src2);
2777             return branchTest32(cond, dest);
2778         }
2779
2780         // This is a signed multiple of two 32-bit values, producing a 64-bit result.
2781         m_assembler.smull(dest, src1, src2);
2782         // Copy bits 63..32 of the result to bits 31..0 of scratch1.
2783         m_assembler.asr<64>(scratch1, dest, 32);
2784         // Splat bit 31 of the result to bits 31..0 of scratch2.
2785         m_assembler.asr<32>(scratch2, dest, 31);
2786         // After a mul32 the top 32 bits of the register should be clear.
2787         zeroExtend32ToPtr(dest, dest);
2788         // Check that bits 31..63 of the original result were all equal.
2789         return branch32(NotEqual, scratch2, scratch1);
2790     }
2791
2792     Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
2793     {
2794         return branchMul32(cond, src1, src2, getCachedDataTempRegisterIDAndInvalidate(), getCachedMemoryTempRegisterIDAndInvalidate(), dest);
2795     }
2796
2797     Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
2798     {
2799         return branchMul32(cond, dest, src, dest);
2800     }
2801
2802     Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
2803     {
2804         move(imm, getCachedDataTempRegisterIDAndInvalidate());
2805         return branchMul32(cond, dataTempRegister, src, dest);
2806     }
2807
2808     Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID scratch1, RegisterID scratch2, RegisterID dest)
2809     {
2810         ASSERT(cond != Signed);
2811
2812         // This is a signed multiple of two 64-bit values, producing a 64-bit result.
2813         m_assembler.mul<64>(dest, src1, src2);
2814
2815         if (cond != Overflow)
2816             return branchTest64(cond, dest);
2817
2818         // Compute bits 127..64 of the result into scratch1.
2819         m_assembler.smulh(scratch1, src1, src2);
2820         // Splat bit 63 of the result to bits 63..0 of scratch2.
2821         m_assembler.asr<64>(scratch2, dest, 63);
2822         // Check that bits 31..63 of the original result were all equal.
2823         return branch64(NotEqual, scratch2, scratch1);
2824     }
2825
2826     Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
2827     {
2828         return branchMul64(cond, src1, src2, getCachedDataTempRegisterIDAndInvalidate(), getCachedMemoryTempRegisterIDAndInvalidate(), dest);
2829     }
2830
2831     Jump branchMul64(ResultCondition cond, RegisterID src, RegisterID dest)
2832     {
2833         return branchMul64(cond, dest, src, dest);
2834     }
2835
2836     Jump branchNeg32(ResultCondition cond, RegisterID dest)
2837     {
2838         m_assembler.neg<32, S>(dest, dest);
2839         return Jump(makeBranch(cond));
2840     }
2841
2842     Jump branchNeg64(ResultCondition cond, RegisterID srcDest)
2843     {
2844         m_assembler.neg<64, S>(srcDest, srcDest);
2845         return Jump(makeBranch(cond));
2846     }
2847
2848     Jump branchSub32(ResultCondition cond, RegisterID dest)
2849     {
2850         m_assembler.neg<32, S>(dest, dest);
2851         return Jump(makeBranch(cond));
2852     }
2853
2854     Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
2855     {
2856         m_assembler.sub<32, S>(dest, op1, op2);
2857         return Jump(makeBranch(cond));
2858     }
2859
2860     Jump branchSub32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
2861     {
2862         if (isUInt12(imm.m_value)) {
2863             m_assembler.sub<32, S>(dest, op1, UInt12(imm.m_value));
2864             return Jump(makeBranch(cond));
2865         }
2866         if (isUInt12(-imm.m_value)) {
2867             m_assembler.add<32, S>(dest, op1, UInt12(-imm.m_value));
2868             return Jump(makeBranch(cond));
2869         }
2870
2871         signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
2872         return branchSub32(cond, op1, dataTempRegister, dest);
2873     }
2874
2875     Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
2876     {
2877         return branchSub32(cond, dest, src, dest);
2878     }
2879
2880     Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
2881     {
2882         return branchSub32(cond, dest, imm, dest);
2883     }
2884
2885     Jump branchSub64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
2886     {
2887         m_assembler.sub<64, S>(dest, op1, op2);
2888         return Jump(makeBranch(cond));
2889     }
2890
2891     Jump branchSub64(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
2892     {
2893         if (isUInt12(imm.m_value)) {
2894             m_assembler.sub<64, S>(dest, op1, UInt12(imm.m_value));
2895             return Jump(makeBranch(cond));
2896         }
2897         if (isUInt12(-imm.m_value)) {
2898             m_assembler.add<64, S>(dest, op1, UInt12(-imm.m_value));
2899             return Jump(makeBranch(cond));
2900         }
2901
2902         move(imm, getCachedDataTempRegisterIDAndInvalidate());
2903         return branchSub64(cond, op1, dataTempRegister, dest);
2904     }
2905
2906     Jump branchSub64(ResultCondition cond, RegisterID src, RegisterID dest)
2907     {
2908         return branchSub64(cond, dest, src, dest);
2909     }
2910
2911     Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
2912     {
2913         return branchSub64(cond, dest, imm, dest);
2914     }
2915
2916     Jump branchSub64(RelationalCondition cond, TrustedImm32 imm, RegisterID dest)
2917     {
2918         ASSERT(isUInt12(imm.m_value));
2919         m_assembler.sub<64, S>(dest, dest, UInt12(imm.m_value));
2920         return Jump(makeBranch(cond));
2921     }
2922
2923
2924     // Jumps, calls, returns
2925
2926     ALWAYS_INLINE Call call()
2927     {
2928         AssemblerLabel pointerLabel = m_assembler.label();
2929         moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate());
2930         invalidateAllTempRegisters();
2931         m_assembler.blr(dataTempRegister);
2932         AssemblerLabel callLabel = m_assembler.label();
2933         ASSERT_UNUSED(pointerLabel, ARM64Assembler::getDifferenceBetweenLabels(callLabel, pointerLabel) == REPATCH_OFFSET_CALL_TO_POINTER);
2934         return Call(callLabel, Call::Linkable);
2935     }
2936
2937     ALWAYS_INLINE Call call(RegisterID target)
2938     {
2939         invalidateAllTempRegisters();
2940         m_assembler.blr(target);
2941         return Call(m_assembler.label(), Call::None);
2942     }
2943
2944     ALWAYS_INLINE Call call(Address address)
2945     {
2946         load64(address, getCachedDataTempRegisterIDAndInvalidate());
2947         return call(dataTempRegister);
2948     }
2949
2950     ALWAYS_INLINE Jump jump()
2951     {
2952         AssemblerLabel label = m_assembler.label();
2953         m_assembler.b();
2954         return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpNoConditionFixedSize : ARM64Assembler::JumpNoCondition);
2955     }
2956
2957     void jump(RegisterID target)
2958     {
2959         m_assembler.br(target);
2960     }
2961
2962     void jump(Address address)
2963     {
2964         load64(address, getCachedDataTempRegisterIDAndInvalidate());
2965         m_assembler.br(dataTempRegister);
2966     }
2967     
2968     void jump(BaseIndex address)
2969     {
2970         load64(address, getCachedDataTempRegisterIDAndInvalidate());
2971         m_assembler.br(dataTempRegister);
2972     }
2973
2974     void jump(AbsoluteAddress address)
2975     {
2976         move(TrustedImmPtr(address.m_ptr), getCachedDataTempRegisterIDAndInvalidate());
2977         load64(Address(dataTempRegister), dataTempRegister);
2978         m_assembler.br(dataTempRegister);
2979     }
2980
2981     ALWAYS_INLINE Call makeTailRecursiveCall(Jump oldJump)
2982     {
2983         oldJump.link(this);
2984         return tailRecursiveCall();
2985     }
2986
2987     ALWAYS_INLINE Call nearCall()
2988     {
2989         m_assembler.bl();
2990         return Call(m_assembler.label(), Call::LinkableNear);
2991     }
2992
2993     ALWAYS_INLINE Call nearTailCall()
2994     {
2995         AssemblerLabel label = m_assembler.label();
2996         m_assembler.b();
2997         return Call(label, Call::LinkableNearTail);
2998     }
2999
3000     ALWAYS_INLINE void ret()
3001     {
3002         m_assembler.ret();
3003     }
3004
3005     ALWAYS_INLINE Call tailRecursiveCall()
3006     {
3007         // Like a normal call, but don't link.
3008         AssemblerLabel pointerLabel = m_assembler.label();
3009         moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate());
3010         m_assembler.br(dataTempRegister);
3011         AssemblerLabel callLabel = m_assembler.label();
3012         ASSERT_UNUSED(pointerLabel, ARM64Assembler::getDifferenceBetweenLabels(callLabel, pointerLabel) == REPATCH_OFFSET_CALL_TO_POINTER);
3013         return Call(callLabel, Call::Linkable);
3014     }
3015
3016
3017     // Comparisons operations
3018
3019     void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
3020     {
3021         m_assembler.cmp<32>(left, right);
3022         m_assembler.cset<32>(dest, ARM64Condition(cond));
3023     }
3024
3025     void compare32(RelationalCondition cond, Address left, RegisterID right, RegisterID dest)
3026     {
3027         load32(left, getCachedDataTempRegisterIDAndInvalidate());
3028         m_assembler.cmp<32>(dataTempRegister, right);
3029         m_assembler.cset<32>(dest, ARM64Condition(cond));
3030     }
3031
3032     void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
3033     {
3034         if (!right.m_value) {
3035             if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
3036                 test32(*resultCondition, left, left, dest);
3037                 return;
3038             }
3039         }
3040
3041         if (isUInt12(right.m_value))
3042             m_assembler.cmp<32>(left, UInt12(right.m_value));
3043         else if (isUInt12(-right.m_value))
3044             m_assembler.cmn<32>(left, UInt12(-right.m_value));
3045         else {
3046             move(right, getCachedDataTempRegisterIDAndInvalidate());
3047             m_assembler.cmp<32>(left, dataTempRegister);
3048         }
3049         m_assembler.cset<32>(dest, ARM64Condition(cond));
3050     }
3051
3052     void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
3053     {
3054         m_assembler.cmp<64>(left, right);
3055         m_assembler.cset<32>(dest, ARM64Condition(cond));
3056     }
3057     
3058     void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
3059     {
3060         if (!right.m_value) {
3061             if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
3062                 test64(*resultCondition, left, left, dest);
3063                 return;
3064             }
3065         }
3066
3067         signExtend32ToPtr(right, getCachedDataTempRegisterIDAndInvalidate());
3068         m_assembler.cmp<64>(left, dataTempRegister);
3069         m_assembler.cset<32>(dest, ARM64Condition(cond));
3070     }
3071
3072     void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
3073     {
3074         TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
3075         MacroAssemblerHelpers::load8OnCondition(*this, cond, left, getCachedMemoryTempRegisterIDAndInvalidate());
3076         move(right8, getCachedDataTempRegisterIDAndInvalidate());
3077         compare32(cond, memoryTempRegister, dataTempRegister, dest);
3078     }
3079
3080     void test32(ResultCondition cond, RegisterID src, RegisterID mask, RegisterID dest)
3081     {
3082         m_assembler.tst<32>(src, mask);
3083         m_assembler.cset<32>(dest, ARM64Condition(cond));
3084     }
3085
3086     void test32(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest)
3087     {
3088         test32(src, mask);
3089         m_assembler.cset<32>(dest, ARM64Condition(cond));
3090     }
3091
3092     void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
3093     {
3094         load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
3095         test32(cond, memoryTempRegister, mask, dest);
3096     }
3097
3098     void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
3099     {
3100         TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
3101         MacroAssemblerHelpers::load8OnCondition(*this, cond, address, getCachedMemoryTempRegisterIDAndInvalidate());
3102         test32(cond, memoryTempRegister, mask8, dest);
3103     }
3104
3105     void test64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
3106     {
3107         m_assembler.tst<64>(op1, op2);
3108         m_assembler.cset<32>(dest, ARM64Condition(cond));
3109     }
3110
3111     void test64(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest)
3112     {
3113         if (mask.m_value == -1)
3114             m_assembler.tst<64>(src, src);
3115         else {
3116             signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
3117             m_assembler.tst<64>(src, dataTempRegister);
3118         }
3119         m_assembler.cset<32>(dest, ARM64Condition(cond));
3120     }
3121
3122     void setCarry(RegisterID dest)
3123     {
3124         m_assembler.cset<32>(dest, ARM64Assembler::ConditionCS);
3125     }
3126
3127     // Patchable operations
3128
3129     ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dest)
3130     {
3131         DataLabel32 label(this);
3132         moveWithFixedWidth(imm, dest);
3133         return label;
3134     }
3135
3136     ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dest)
3137     {
3138         DataLabelPtr label(this);
3139         moveWithFixedWidth(imm, dest);
3140         return label;
3141     }
3142
3143     ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
3144     {
3145         dataLabel = DataLabelPtr(this);
3146         moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
3147         return branch64(cond, left, dataTempRegister);
3148     }
3149
3150     ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
3151     {
3152         dataLabel = DataLabelPtr(this);
3153         moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
3154         return branch64(cond, left, dataTempRegister);
3155     }
3156
3157     ALWAYS_INLINE Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
3158     {
3159         dataLabel = DataLabel32(this);
3160         moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
3161         return branch32(cond, left, dataTempRegister);
3162     }
3163
3164     PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
3165     {
3166         m_makeJumpPatchable = true;
3167         Jump result = branch64(cond, left, TrustedImm64(right));
3168         m_makeJumpPatchable = false;
3169         return PatchableJump(result);
3170     }
3171
3172     PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
3173     {
3174         m_makeJumpPatchable = true;
3175         Jump result = branchTest32(cond, reg, mask);
3176         m_makeJumpPatchable = false;
3177         return PatchableJump(result);
3178     }
3179
3180     PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
3181     {
3182         m_makeJumpPatchable = true;
3183         Jump result = branch32(cond, reg, imm);
3184         m_makeJumpPatchable = false;
3185         return PatchableJump(result);
3186     }
3187
3188     PatchableJump patchableBranch32(RelationalCondition cond, Address left, TrustedImm32 imm)
3189     {
3190         m_makeJumpPatchable = true;
3191         Jump result = branch32(cond, left, imm);
3192         m_makeJumpPatchable = false;
3193         return PatchableJump(result);
3194     }
3195
3196     PatchableJump patchableBranch64(RelationalCondition cond, RegisterID reg, TrustedImm64 imm)
3197     {
3198         m_makeJumpPatchable = true;
3199         Jump result = branch64(cond, reg, imm);
3200         m_makeJumpPatchable = false;
3201         return PatchableJump(result);
3202     }
3203
3204     PatchableJump patchableBranch64(RelationalCondition cond, RegisterID left, RegisterID right)
3205     {
3206         m_makeJumpPatchable = true;
3207         Jump result = branch64(cond, left, right);
3208         m_makeJumpPatchable = false;
3209         return PatchableJump(result);
3210     }
3211
3212     PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
3213     {
3214         m_makeJumpPatchable = true;
3215         Jump result = branchPtrWithPatch(cond, left, dataLabel, initialRightValue);
3216         m_makeJumpPatchable = false;
3217         return PatchableJump(result);
3218     }
3219
3220     PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
3221     {
3222         m_makeJumpPatchable = true;
3223         Jump result = branch32WithPatch(cond, left, dataLabel, initialRightValue);
3224         m_makeJumpPatchable = false;
3225         return PatchableJump(result);
3226     }
3227
3228     PatchableJump patchableJump()
3229     {
3230         m_makeJumpPatchable = true;
3231         Jump result = jump();
3232         m_makeJumpPatchable = false;
3233         return PatchableJump(result);
3234     }
3235
3236     ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
3237     {
3238         DataLabelPtr label(this);
3239         moveWithFixedWidth(initialValue, getCachedDataTempRegisterIDAndInvalidate());
3240         store64(dataTempRegister, address);
3241         return label;
3242     }
3243
3244     ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address)
3245     {
3246         return storePtrWithPatch(TrustedImmPtr(0), address);
3247     }
3248
3249     static void reemitInitialMoveWithPatch(void* address, void* value)
3250     {
3251         ARM64Assembler::setPointer(static_cast<int*>(address), value, dataTempRegister, true);
3252     }
3253
3254     // Miscellaneous operations:
3255
3256     void breakpoint(uint16_t imm = 0)
3257     {
3258         m_assembler.brk(imm);
3259     }
3260
3261     void nop()
3262     {
3263         m_assembler.nop();
3264     }
3265     
3266     // We take memoryFence to mean acqrel. This has acqrel semantics on ARM64.
3267     void memoryFence()
3268     {
3269         m_assembler.dmbISH();
3270     }
3271
3272     // We take this to mean that it prevents motion of normal stores. That's a store fence on ARM64 (hence the "ST").
3273     void storeFence()
3274     {
3275         m_assembler.dmbISHST();
3276     }
3277
3278     // We take this to mean that it prevents motion of normal loads. Ideally we'd have expressed this
3279     // using dependencies or half fences, but there are cases where this is as good as it gets. The only
3280     // way to get a standalone load fence instruction on ARM is to use the ISH fence, which is just like
3281     // the memoryFence().
3282     void loadFence()
3283     {
3284         m_assembler.dmbISH();
3285     }
3286
3287     // Misc helper functions.
3288
3289     // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
3290     static RelationalCondition invert(RelationalCondition cond)
3291     {
3292         return static_cast<RelationalCondition>(ARM64Assembler::invert(static_cast<ARM64Assembler::Condition>(cond)));
3293     }
3294
3295     static Optional<ResultCondition> commuteCompareToZeroIntoTest(RelationalCondition cond)
3296     {
3297         switch (cond) {
3298         case Equal:
3299             return Zero;
3300         case NotEqual:
3301             return NonZero;
3302         case LessThan:
3303             return Signed;
3304         case GreaterThanOrEqual:
3305             return PositiveOrZero;
3306             break;
3307         default:
3308             return Nullopt;
3309         }
3310     }
3311
3312     static FunctionPtr readCallTarget(CodeLocationCall call)
3313     {
3314         return FunctionPtr(reinterpret_cast<void(*)()>(ARM64Assembler::readCallTarget(call.dataLocation())));
3315     }
3316
3317     static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
3318     {
3319         ARM64Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
3320     }
3321     
3322     static ptrdiff_t maxJumpReplacementSize()
3323     {
3324         return ARM64Assembler::maxJumpReplacementSize();
3325     }
3326
3327     static ptrdiff_t patchableJumpSize()
3328     {
3329         return ARM64Assembler::patchableJumpSize();
3330     }
3331
3332     RegisterID scratchRegisterForBlinding()
3333     {
3334         // We *do not* have a scratch register for blinding.
3335         RELEASE_ASSERT_NOT_REACHED();
3336         return getCachedDataTempRegisterIDAndInvalidate();
3337     }
3338
3339     static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
3340     static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
3341     
3342     static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
3343     {
3344         return label.labelAtOffset(0);
3345     }
3346     
3347     static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
3348     {
3349         UNREACHABLE_FOR_PLATFORM();
3350         return CodeLocationLabel();
3351     }
3352     
3353     static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
3354     {
3355         UNREACHABLE_FOR_PLATFORM();
3356         return CodeLocationLabel();
3357     }
3358     
3359     static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
3360     {
3361         reemitInitialMoveWithPatch(instructionStart.dataLocation(), initialValue);
3362     }
3363     
3364     static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
3365     {
3366         UNREACHABLE_FOR_PLATFORM();
3367     }
3368
3369     static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
3370     {
3371         UNREACHABLE_FOR_PLATFORM();
3372     }
3373
3374     static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
3375     {
3376         ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
3377     }
3378
3379     static void repatchCall(CodeLocationCall call, FunctionPtr destination)
3380     {
3381         ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
3382     }
3383
3384 #if ENABLE(MASM_PROBE)
3385     void probe(ProbeFunction, void* arg1, void* arg2);
3386 #endif // ENABLE(MASM_PROBE)
3387
3388 protected:
3389     ALWAYS_INLINE Jump makeBranch(ARM64Assembler::Condition cond)
3390     {
3391         m_assembler.b_cond(cond);
3392         AssemblerLabel label = m_assembler.label();
3393         m_assembler.nop();
3394         return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpConditionFixedSize : ARM64Assembler::JumpCondition, cond);
3395     }
3396     ALWAYS_INLINE Jump makeBranch(RelationalCondition cond) { return makeBranch(ARM64Condition(cond)); }
3397     ALWAYS_INLINE Jump makeBranch(ResultCondition cond) { return makeBranch(ARM64Condition(cond)); }
3398     ALWAYS_INLINE Jump makeBranch(DoubleCondition cond) { return makeBranch(ARM64Condition(cond)); }
3399
3400     template <int dataSize>
3401     ALWAYS_INLINE Jump makeCompareAndBranch(ZeroCondition cond, RegisterID reg)
3402     {
3403         if (cond == IsZero)
3404             m_assembler.cbz<dataSize>(reg);
3405         else
3406             m_assembler.cbnz<dataSize>(reg);
3407         AssemblerLabel label = m_assembler.label();
3408         m_assembler.nop();
3409         return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpCompareAndBranchFixedSize : ARM64Assembler::JumpCompareAndBranch, static_cast<ARM64Assembler::Condition>(cond), dataSize == 64, reg);
3410     }
3411
3412     ALWAYS_INLINE Jump makeTestBitAndBranch(RegisterID reg, unsigned bit, ZeroCondition cond)
3413     {
3414         ASSERT(bit < 64);
3415         bit &= 0x3f;
3416         if (cond == IsZero)
3417             m_assembler.tbz(reg, bit);
3418         else
3419             m_assembler.tbnz(reg, bit);
3420         AssemblerLabel label = m_assembler.label();
3421         m_assembler.nop();
3422         return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpTestBitFixedSize : ARM64Assembler::JumpTestBit, static_cast<ARM64Assembler::Condition>(cond), bit, reg);
3423     }
3424
3425     ARM64Assembler::Condition ARM64Condition(RelationalCondition cond)
3426     {
3427         return static_cast<ARM64Assembler::Condition>(cond);
3428     }
3429
3430     ARM64Assembler::Condition ARM64Condition(ResultCondition cond)
3431     {
3432         return static_cast<ARM64Assembler::Condition>(cond);
3433     }
3434
3435     ARM64Assembler::Condition ARM64Condition(DoubleCondition cond)
3436     {
3437         return static_cast<ARM64Assembler::Condition>(cond);
3438     }
3439     
3440 private:
3441     ALWAYS_INLINE RegisterID getCachedDataTempRegisterIDAndInvalidate()
3442     {
3443         RELEASE_ASSERT(m_allowScratchRegister);
3444         return dataMemoryTempRegister().registerIDInvalidate();
3445     }
3446     ALWAYS_INLINE RegisterID getCachedMemoryTempRegisterIDAndInvalidate()
3447     {
3448         RELEASE_ASSERT(m_allowScratchRegister);
3449         return cachedMemoryTempRegister().registerIDInvalidate();
3450     }
3451     ALWAYS_INLINE CachedTempRegister& dataMemoryTempRegister()
3452     {
3453         RELEASE_ASSERT(m_allowScratchRegister);
3454         return m_dataMemoryTempRegister;
3455     }
3456     ALWAYS_INLINE CachedTempRegister& cachedMemoryTempRegister()
3457     {
3458         RELEASE_ASSERT(m_allowScratchRegister);
3459         return m_cachedMemoryTempRegister;
3460     }
3461
3462     ALWAYS_INLINE bool isInIntRange(intptr_t value)
3463     {
3464         return value == ((value << 32) >> 32);
3465     }
3466
3467     template<typename ImmediateType, typename rawType>
3468     void moveInternal(ImmediateType imm, RegisterID dest)
3469     {
3470         const int dataSize = sizeof(rawType) * 8;
3471         const int numberHalfWords = dataSize / 16;
3472         rawType value = bitwise_cast<rawType>(imm.m_value);
3473         uint16_t halfword[numberHalfWords];
3474
3475         // Handle 0 and ~0 here to simplify code below
3476         if (!value) {
3477             m_assembler.movz<dataSize>(dest, 0);
3478             return;
3479         }
3480         if (!~value) {
3481             m_assembler.movn<dataSize>(dest, 0);
3482             return;
3483         }
3484
3485         LogicalImmediate logicalImm = dataSize == 64 ? LogicalImmediate::create64(static_cast<uint64_t>(value)) : LogicalImmediate::create32(static_cast<uint32_t>(value));
3486
3487         if (logicalImm.isValid()) {
3488             m_assembler.movi<dataSize>(dest, logicalImm);
3489             return;
3490         }
3491
3492         // Figure out how many halfwords are 0 or FFFF, then choose movz or movn accordingly.
3493         int zeroOrNegateVote = 0;
3494         for (int i = 0; i < numberHalfWords; ++i) {
3495             halfword[i] = getHalfword(value, i);
3496             if (!halfword[i])
3497                 zeroOrNegateVote++;
3498             else if (halfword[i] == 0xffff)
3499                 zeroOrNegateVote--;
3500         }
3501
3502         bool needToClearRegister = true;
3503         if (zeroOrNegateVote >= 0) {
3504             for (int i = 0; i < numberHalfWords; i++) {
3505                 if (halfword[i]) {
3506                     if (needToClearRegister) {
3507                         m_assembler.movz<dataSize>(dest, halfword[i], 16*i);
3508                         needToClearRegister = false;
3509                     } else
3510                         m_assembler.movk<dataSize>(dest, halfword[i], 16*i);
3511                 }
3512             }
3513         } else {
3514             for (int i = 0; i < numberHalfWords; i++) {
3515                 if (halfword[i] != 0xffff) {
3516                     if (needToClearRegister) {
3517                         m_assembler.movn<dataSize>(dest, ~halfword[i], 16*i);
3518                         needToClearRegister = false;
3519                     } else
3520                         m_assembler.movk<dataSize>(dest, halfword[i], 16*i);
3521                 }
3522             }
3523         }
3524     }
3525
3526     template<int datasize>
3527     ALWAYS_INLINE void loadUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
3528     {
3529         m_assembler.ldr<datasize>(rt, rn, pimm);
3530     }
3531
3532     template<int datasize>
3533     ALWAYS_INLINE void loadUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
3534     {
3535         m_assembler.ldur<datasize>(rt, rn, simm);
3536     }
3537
3538     template<int datasize>
3539     ALWAYS_INLINE void loadSignedAddressedByUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
3540     {
3541         loadUnsignedImmediate<datasize>(rt, rn, pimm);
3542     }
3543
3544     template<int datasize>
3545     ALWAYS_INLINE void loadSignedAddressedByUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
3546     {
3547         loadUnscaledImmediate<datasize>(rt, rn, simm);
3548     }
3549
3550     template<int datasize>
3551     ALWAYS_INLINE void storeUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
3552     {
3553         m_assembler.str<datasize>(rt, rn, pimm);
3554     }
3555
3556     template<int datasize>
3557     ALWAYS_INLINE void storeUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
3558     {
3559         m_assembler.stur<datasize>(rt, rn, simm);
3560     }
3561
3562     void moveWithFixedWidth(TrustedImm32 imm, RegisterID dest)
3563     {
3564         int32_t value = imm.m_value;
3565         m_assembler.movz<32>(dest, getHalfword(value, 0));
3566         m_assembler.movk<32>(dest, getHalfword(value, 1), 16);
3567     }
3568
3569     void moveWithFixedWidth(TrustedImmPtr imm, RegisterID dest)
3570     {
3571         intptr_t value = reinterpret_cast<intptr_t>(imm.m_value);
3572         m_assembler.movz<64>(dest, getHalfword(value, 0));
3573         m_assembler.movk<64>(dest, getHalfword(value, 1), 16);
3574         m_assembler.movk<64>(dest, getHalfword(value, 2), 32);
3575     }
3576
3577     void signExtend32ToPtrWithFixedWidth(int32_t value, RegisterID dest)
3578     {
3579         if (value >= 0) {
3580             m_assembler.movz<32>(dest, getHalfword(value, 0));
3581             m_assembler.movk<32>(dest, getHalfword(value, 1), 16);
3582         } else {
3583             m_assembler.movn<32>(dest, ~getHalfword(value, 0));
3584             m_assembler.movk<32>(dest, getHalfword(value, 1), 16);
3585         }
3586     }
3587
3588     template<int datasize>
3589     ALWAYS_INLINE void load(const void* address, RegisterID dest)
3590     {
3591         intptr_t currentRegisterContents;
3592         if (cachedMemoryTempRegister().value(currentRegisterContents)) {
3593             intptr_t addressAsInt = reinterpret_cast<intptr_t>(address);
3594             intptr_t addressDelta = addressAsInt - currentRegisterContents;
3595
3596             if (dest == memoryTempRegister)
3597                 cachedMemoryTempRegister().invalidate();
3598
3599             if (isInIntRange(addressDelta)) {
3600                 if (ARM64Assembler::canEncodeSImmOffset(addressDelta)) {
3601                     m_assembler.ldur<datasize>(dest,  memoryTempRegister, addressDelta);
3602                     return;
3603                 }
3604
3605                 if (ARM64Assembler::canEncodePImmOffset<datasize>(addressDelta)) {
3606                     m_assembler.ldr<datasize>(dest,  memoryTempRegister, addressDelta);
3607                     return;
3608                 }
3609             }
3610
3611             if ((addressAsInt & (~maskHalfWord0)) == (currentRegisterContents & (~maskHalfWord0))) {
3612                 m_assembler.movk<64>(memoryTempRegister, addressAsInt & maskHalfWord0, 0);
3613                 cachedMemoryTempRegister().setValue(reinterpret_cast<intptr_t>(address));
3614                 m_assembler.ldr<datasize>(dest, memoryTempRegister, ARM64Registers::zr);
3615                 return;
3616             }
3617         }
3618
3619         move(TrustedImmPtr(address), memoryTempRegister);
3620         if (dest == memoryTempRegister)
3621             cachedMemoryTempRegister().invalidate();
3622         else
3623             cachedMemoryTempRegister().setValue(reinterpret_cast<intptr_t>(address));
3624         m_assembler.ldr<datasize>(dest, memoryTempRegister, ARM64Registers::zr);
3625     }
3626
3627     template<int datasize>
3628     ALWAYS_INLINE void store(RegisterID src, const void* address)
3629     {
3630         ASSERT(src != memoryTempRegister);
3631         intptr_t currentRegisterContents;
3632         if (cachedMemoryTempRegister().value(currentRegisterContents)) {
3633             intptr_t addressAsInt = reinterpret_cast<intptr_t>(address);
3634             intptr_t addressDelta = addressAsInt - currentRegisterContents;
3635
3636             if (isInIntRange(addressDelta)) {
3637                 if (ARM64Assembler::canEncodeSImmOffset(addressDelta)) {
3638                     m_assembler.stur<datasize>(src, memoryTempRegister, addressDelta);
3639                     return;
3640                 }
3641
3642                 if (ARM64Assembler::canEncodePImmOffset<datasize>(addressDelta)) {
3643                     m_assembler.str<datasize>(src, memoryTempRegister, addressDelta);
3644                     return;
3645                 }
3646             }
3647
3648             if ((addressAsInt & (~maskHalfWord0)) == (currentRegisterContents & (~maskHalfWord0))) {
3649                 m_assembler.movk<64>(memoryTempRegister, addressAsInt & maskHalfWord0, 0);
3650                 cachedMemoryTempRegister().setValue(reinterpret_cast<intptr_t>(address));
3651                 m_assembler.str<datasize>(src, memoryTempRegister, ARM64Registers::zr);
3652                 return;
3653             }
3654         }
3655
3656         move(TrustedImmPtr(address), memoryTempRegister);
3657         cachedMemoryTempRegister().setValue(reinterpret_cast<intptr_t>(address));
3658         m_assembler.str<datasize>(src, memoryTempRegister, ARM64Registers::zr);
3659     }
3660
3661     template <int dataSize>
3662     ALWAYS_INLINE bool tryMoveUsingCacheRegisterContents(intptr_t immediate, CachedTempRegister& dest)
3663     {
3664         intptr_t currentRegisterContents;
3665         if (dest.value(currentRegisterContents)) {
3666             if (currentRegisterContents == immediate)
3667                 return true;
3668
3669             LogicalImmediate logicalImm = dataSize == 64 ? LogicalImmediate::create64(static_cast<uint64_t>(immediate)) : LogicalImmediate::create32(static_cast<uint32_t>(immediate));
3670
3671             if (logicalImm.isValid()) {
3672                 m_assembler.movi<dataSize>(dest.registerIDNoInvalidate(), logicalImm);
3673                 dest.setValue(immediate);
3674                 return true;
3675             }
3676
3677             if ((immediate & maskUpperWord) == (currentRegisterContents & maskUpperWord)) {
3678                 if ((immediate & maskHalfWord1) != (currentRegisterContents & maskHalfWord1))
3679                     m_assembler.movk<dataSize>(dest.registerIDNoInvalidate(), (immediate & maskHalfWord1) >> 16, 16);
3680
3681                 if ((immediate & maskHalfWord0) != (currentRegisterContents & maskHalfWord0))
3682                     m_assembler.movk<dataSize>(dest.registerIDNoInvalidate(), immediate & maskHalfWord0, 0);
3683
3684                 dest.setValue(immediate);
3685                 return true;
3686             }
3687         }
3688
3689         return false;
3690     }
3691
3692     void moveToCachedReg(TrustedImm32 imm, CachedTempRegister& dest)
3693     {
3694         if (tryMoveUsingCacheRegisterContents<32>(static_cast<intptr_t>(imm.m_value), dest))
3695             return;
3696
3697         moveInternal<TrustedImm32, int32_t>(imm, dest.registerIDNoInvalidate());
3698         dest.setValue(imm.m_value);
3699     }
3700
3701     void moveToCachedReg(TrustedImmPtr imm, CachedTempRegister& dest)
3702     {
3703         if (tryMoveUsingCacheRegisterContents<64>(imm.asIntptr(), dest))
3704             return;
3705
3706         moveInternal<TrustedImmPtr, intptr_t>(imm, dest.registerIDNoInvalidate());
3707         dest.setValue(imm.asIntptr());
3708     }
3709
3710     void moveToCachedReg(TrustedImm64 imm, CachedTempRegister& dest)
3711     {
3712         if (tryMoveUsingCacheRegisterContents<64>(static_cast<intptr_t>(imm.m_value), dest))
3713             return;
3714
3715         moveInternal<TrustedImm64, int64_t>(imm, dest.registerIDNoInvalidate());
3716         dest.setValue(imm.m_value);
3717     }
3718
3719     template<int datasize>
3720     ALWAYS_INLINE bool tryLoadWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
3721     {
3722         if (ARM64Assembler::canEncodeSImmOffset(offset)) {
3723             loadUnscaledImmediate<datasize>(rt, rn, offset);
3724             return true;
3725         }
3726         if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
3727             loadUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset));
3728             return true;
3729         }
3730         return false;
3731     }
3732
3733     template<int datasize>
3734     ALWAYS_INLINE bool tryLoadSignedWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
3735     {
3736         if (ARM64Assembler::canEncodeSImmOffset(offset)) {
3737             loadSignedAddressedByUnscaledImmediate<datasize>(rt, rn, offset);
3738             return true;
3739         }
3740         if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
3741             loadSignedAddressedByUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset));
3742             return true;
3743         }
3744         return false;
3745     }
3746
3747     template<int datasize>
3748     ALWAYS_INLINE bool tryLoadWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset)
3749     {
3750         if (ARM64Assembler::canEncodeSImmOffset(offset)) {
3751             m_assembler.ldur<datasize>(rt, rn, offset);
3752             return true;
3753         }
3754         if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
3755             m_assembler.ldr<datasize>(rt, rn, static_cast<unsigned>(offset));
3756             return true;
3757         }
3758         return false;
3759     }
3760
3761     template<int datasize>
3762     ALWAYS_INLINE bool tryStoreWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
3763     {
3764         if (ARM64Assembler::canEncodeSImmOffset(offset)) {
3765             storeUnscaledImmediate<datasize>(rt, rn, offset);
3766             return true;
3767         }
3768         if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
3769             storeUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset));
3770             return true;
3771         }
3772         return false;
3773     }
3774
3775     template<int datasize>
3776     ALWAYS_INLINE bool tryStoreWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset)
3777     {
3778         if (ARM64Assembler::canEncodeSImmOffset(offset)) {
3779             m_assembler.stur<datasize>(rt, rn, offset);
3780             return true;
3781         }
3782         if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
3783             m_assembler.str<datasize>(rt, rn, static_cast<unsigned>(offset));
3784             return true;
3785         }
3786         return false;
3787     }
3788
3789     Jump jumpAfterFloatingPointCompare(DoubleCondition cond)
3790     {
3791         if (cond == DoubleNotEqual) {
3792             // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
3793             Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
3794             Jump result = makeBranch(ARM64Assembler::ConditionNE);
3795             unordered.link(this);
3796             return result;
3797         }
3798         if (cond == DoubleEqualOrUnordered) {
3799             Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
3800             Jump notEqual = makeBranch(ARM64Assembler::ConditionNE);
3801             unordered.link(this);
3802             // We get here if either unordered or equal.
3803             Jump result = jump();
3804             notEqual.link(this);
3805             return result;
3806         }
3807         return makeBranch(cond);
3808     }
3809
3810     friend class LinkBuffer;
3811
3812     static void linkCall(void* code, Call call, FunctionPtr function)
3813     {