277c0522bd747625a94e0b62af85ab379678749b
[WebKit-https.git] / Source / JavaScriptCore / assembler / MacroAssemblerARM64.h
1 /*
2  * Copyright (C) 2012, 2014, 2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #ifndef MacroAssemblerARM64_h
27 #define MacroAssemblerARM64_h
28
29 #if ENABLE(ASSEMBLER)
30
31 #include "ARM64Assembler.h"
32 #include "AbstractMacroAssembler.h"
33 #include <wtf/MathExtras.h>
34 #include <wtf/Optional.h>
35
36 namespace JSC {
37
38 class MacroAssemblerARM64 : public AbstractMacroAssembler<ARM64Assembler, MacroAssemblerARM64> {
39 public:
40     static const unsigned numGPRs = 32;
41     static const unsigned numFPRs = 32;
42     
43     static const RegisterID dataTempRegister = ARM64Registers::ip0;
44     static const RegisterID memoryTempRegister = ARM64Registers::ip1;
45
46     RegisterID scratchRegister()
47     {
48         RELEASE_ASSERT(m_allowScratchRegister);
49         return getCachedDataTempRegisterIDAndInvalidate();
50     }
51
52 private:
53     static const ARM64Registers::FPRegisterID fpTempRegister = ARM64Registers::q31;
54     static const ARM64Assembler::SetFlags S = ARM64Assembler::S;
55     static const intptr_t maskHalfWord0 = 0xffffl;
56     static const intptr_t maskHalfWord1 = 0xffff0000l;
57     static const intptr_t maskUpperWord = 0xffffffff00000000l;
58
59     // 4 instructions - 3 to load the function pointer, + blr.
60     static const ptrdiff_t REPATCH_OFFSET_CALL_TO_POINTER = -16;
61     
62 public:
63     MacroAssemblerARM64()
64         : m_dataMemoryTempRegister(this, dataTempRegister)
65         , m_cachedMemoryTempRegister(this, memoryTempRegister)
66         , m_makeJumpPatchable(false)
67     {
68     }
69
70     typedef ARM64Assembler::LinkRecord LinkRecord;
71     typedef ARM64Assembler::JumpType JumpType;
72     typedef ARM64Assembler::JumpLinkType JumpLinkType;
73     typedef ARM64Assembler::Condition Condition;
74
75     static const ARM64Assembler::Condition DefaultCondition = ARM64Assembler::ConditionInvalid;
76     static const ARM64Assembler::JumpType DefaultJump = ARM64Assembler::JumpNoConditionFixedSize;
77
78     Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); }
79     void* unlinkedCode() { return m_assembler.unlinkedCode(); }
80     static bool canCompact(JumpType jumpType) { return ARM64Assembler::canCompact(jumpType); }
81     static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return ARM64Assembler::computeJumpType(jumpType, from, to); }
82     static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return ARM64Assembler::computeJumpType(record, from, to); }
83     static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return ARM64Assembler::jumpSizeDelta(jumpType, jumpLinkType); }
84     static void link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction, uint8_t* to) { return ARM64Assembler::link(record, from, fromInstruction, to); }
85
86     static const Scale ScalePtr = TimesEight;
87
88     static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
89     {
90         // This is the largest 32-bit access allowed, aligned to 64-bit boundary.
91         return !(value & ~0x3ff8);
92     }
93
94     enum RelationalCondition {
95         Equal = ARM64Assembler::ConditionEQ,
96         NotEqual = ARM64Assembler::ConditionNE,
97         Above = ARM64Assembler::ConditionHI,
98         AboveOrEqual = ARM64Assembler::ConditionHS,
99         Below = ARM64Assembler::ConditionLO,
100         BelowOrEqual = ARM64Assembler::ConditionLS,
101         GreaterThan = ARM64Assembler::ConditionGT,
102         GreaterThanOrEqual = ARM64Assembler::ConditionGE,
103         LessThan = ARM64Assembler::ConditionLT,
104         LessThanOrEqual = ARM64Assembler::ConditionLE
105     };
106
107     enum ResultCondition {
108         Overflow = ARM64Assembler::ConditionVS,
109         Signed = ARM64Assembler::ConditionMI,
110         PositiveOrZero = ARM64Assembler::ConditionPL,
111         Zero = ARM64Assembler::ConditionEQ,
112         NonZero = ARM64Assembler::ConditionNE
113     };
114
115     enum ZeroCondition {
116         IsZero = ARM64Assembler::ConditionEQ,
117         IsNonZero = ARM64Assembler::ConditionNE
118     };
119
120     enum DoubleCondition {
121         // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
122         DoubleEqual = ARM64Assembler::ConditionEQ,
123         DoubleNotEqual = ARM64Assembler::ConditionVC, // Not the right flag! check for this & handle differently.
124         DoubleGreaterThan = ARM64Assembler::ConditionGT,
125         DoubleGreaterThanOrEqual = ARM64Assembler::ConditionGE,
126         DoubleLessThan = ARM64Assembler::ConditionLO,
127         DoubleLessThanOrEqual = ARM64Assembler::ConditionLS,
128         // If either operand is NaN, these conditions always evaluate to true.
129         DoubleEqualOrUnordered = ARM64Assembler::ConditionVS, // Not the right flag! check for this & handle differently.
130         DoubleNotEqualOrUnordered = ARM64Assembler::ConditionNE,
131         DoubleGreaterThanOrUnordered = ARM64Assembler::ConditionHI,
132         DoubleGreaterThanOrEqualOrUnordered = ARM64Assembler::ConditionHS,
133         DoubleLessThanOrUnordered = ARM64Assembler::ConditionLT,
134         DoubleLessThanOrEqualOrUnordered = ARM64Assembler::ConditionLE,
135     };
136
137     static const RegisterID stackPointerRegister = ARM64Registers::sp;
138     static const RegisterID framePointerRegister = ARM64Registers::fp;
139     static const RegisterID linkRegister = ARM64Registers::lr;
140
141     // FIXME: Get reasonable implementations for these
142     static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
143     static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; }
144
145     // Integer operations:
146
147     void add32(RegisterID a, RegisterID b, RegisterID dest)
148     {
149         ASSERT(a != ARM64Registers::sp && b != ARM64Registers::sp);
150         m_assembler.add<32>(dest, a, b);
151     }
152
153     void add32(RegisterID src, RegisterID dest)
154     {
155         m_assembler.add<32>(dest, dest, src);
156     }
157
158     void add32(TrustedImm32 imm, RegisterID dest)
159     {
160         add32(imm, dest, dest);
161     }
162
163     void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
164     {
165         if (isUInt12(imm.m_value))
166             m_assembler.add<32>(dest, src, UInt12(imm.m_value));
167         else if (isUInt12(-imm.m_value))
168             m_assembler.sub<32>(dest, src, UInt12(-imm.m_value));
169         else {
170             move(imm, getCachedDataTempRegisterIDAndInvalidate());
171             m_assembler.add<32>(dest, src, dataTempRegister);
172         }
173     }
174
175     void add32(TrustedImm32 imm, Address address)
176     {
177         load32(address, getCachedDataTempRegisterIDAndInvalidate());
178
179         if (isUInt12(imm.m_value))
180             m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
181         else if (isUInt12(-imm.m_value))
182             m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
183         else {
184             move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
185             m_assembler.add<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
186         }
187
188         store32(dataTempRegister, address);
189     }
190
191     void add32(TrustedImm32 imm, AbsoluteAddress address)
192     {
193         load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
194
195         if (isUInt12(imm.m_value)) {
196             m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
197             store32(dataTempRegister, address.m_ptr);
198             return;
199         }
200
201         if (isUInt12(-imm.m_value)) {
202             m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
203             store32(dataTempRegister, address.m_ptr);
204             return;
205         }
206
207         move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
208         m_assembler.add<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
209         store32(dataTempRegister, address.m_ptr);
210     }
211
212     void add32(Address src, RegisterID dest)
213     {
214         load32(src, getCachedDataTempRegisterIDAndInvalidate());
215         add32(dataTempRegister, dest);
216     }
217
218     void add64(RegisterID a, RegisterID b, RegisterID dest)
219     {
220         ASSERT(a != ARM64Registers::sp || b != ARM64Registers::sp);
221         if (b == ARM64Registers::sp)
222             std::swap(a, b);
223         m_assembler.add<64>(dest, a, b);
224     }
225
226     void add64(RegisterID src, RegisterID dest)
227     {
228         if (src == ARM64Registers::sp)
229             m_assembler.add<64>(dest, src, dest);
230         else
231             m_assembler.add<64>(dest, dest, src);
232     }
233
234     void add64(TrustedImm32 imm, RegisterID dest)
235     {
236         if (isUInt12(imm.m_value)) {
237             m_assembler.add<64>(dest, dest, UInt12(imm.m_value));
238             return;
239         }
240         if (isUInt12(-imm.m_value)) {
241             m_assembler.sub<64>(dest, dest, UInt12(-imm.m_value));
242             return;
243         }
244
245         signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
246         m_assembler.add<64>(dest, dest, dataTempRegister);
247     }
248
249     void add64(TrustedImm64 imm, RegisterID dest)
250     {
251         intptr_t immediate = imm.m_value;
252
253         if (isUInt12(immediate)) {
254             m_assembler.add<64>(dest, dest, UInt12(static_cast<int32_t>(immediate)));
255             return;
256         }
257         if (isUInt12(-immediate)) {
258             m_assembler.sub<64>(dest, dest, UInt12(static_cast<int32_t>(-immediate)));
259             return;
260         }
261
262         move(imm, getCachedDataTempRegisterIDAndInvalidate());
263         m_assembler.add<64>(dest, dest, dataTempRegister);
264     }
265
266     void add64(TrustedImm32 imm, RegisterID src, RegisterID dest)
267     {
268         if (isUInt12(imm.m_value)) {
269             m_assembler.add<64>(dest, src, UInt12(imm.m_value));
270             return;
271         }
272         if (isUInt12(-imm.m_value)) {
273             m_assembler.sub<64>(dest, src, UInt12(-imm.m_value));
274             return;
275         }
276
277         signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
278         m_assembler.add<64>(dest, src, dataTempRegister);
279     }
280
281     void add64(TrustedImm32 imm, Address address)
282     {
283         load64(address, getCachedDataTempRegisterIDAndInvalidate());
284
285         if (isUInt12(imm.m_value))
286             m_assembler.add<64>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
287         else if (isUInt12(-imm.m_value))
288             m_assembler.sub<64>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
289         else {
290             signExtend32ToPtr(imm, getCachedMemoryTempRegisterIDAndInvalidate());
291             m_assembler.add<64>(dataTempRegister, dataTempRegister, memoryTempRegister);
292         }
293
294         store64(dataTempRegister, address);
295     }
296
297     void add64(TrustedImm32 imm, AbsoluteAddress address)
298     {
299         load64(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
300
301         if (isUInt12(imm.m_value)) {
302             m_assembler.add<64>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
303             store64(dataTempRegister, address.m_ptr);
304             return;
305         }
306
307         if (isUInt12(-imm.m_value)) {
308             m_assembler.sub<64>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
309             store64(dataTempRegister, address.m_ptr);
310             return;
311         }
312
313         signExtend32ToPtr(imm, getCachedMemoryTempRegisterIDAndInvalidate());
314         m_assembler.add<64>(dataTempRegister, dataTempRegister, memoryTempRegister);
315         store64(dataTempRegister, address.m_ptr);
316     }
317
318     void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest)
319     {
320         add64(imm, srcDest);
321     }
322
323     void add64(Address src, RegisterID dest)
324     {
325         load64(src, getCachedDataTempRegisterIDAndInvalidate());
326         m_assembler.add<64>(dest, dest, dataTempRegister);
327     }
328
329     void add64(AbsoluteAddress src, RegisterID dest)
330     {
331         load64(src.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
332         m_assembler.add<64>(dest, dest, dataTempRegister);
333     }
334
335     void and32(RegisterID src, RegisterID dest)
336     {
337         and32(dest, src, dest);
338     }
339
340     void and32(RegisterID op1, RegisterID op2, RegisterID dest)
341     {
342         m_assembler.and_<32>(dest, op1, op2);
343     }
344
345     void and32(TrustedImm32 imm, RegisterID dest)
346     {
347         and32(imm, dest, dest);
348     }
349
350     void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
351     {
352         LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
353
354         if (logicalImm.isValid()) {
355             m_assembler.and_<32>(dest, src, logicalImm);
356             return;
357         }
358
359         move(imm, getCachedDataTempRegisterIDAndInvalidate());
360         m_assembler.and_<32>(dest, src, dataTempRegister);
361     }
362
363     void and32(Address src, RegisterID dest)
364     {
365         load32(src, dataTempRegister);
366         and32(dataTempRegister, dest);
367     }
368
369     void and64(RegisterID src1, RegisterID src2, RegisterID dest)
370     {
371         m_assembler.and_<64>(dest, src1, src2);
372     }
373
374     void and64(TrustedImm64 imm, RegisterID src, RegisterID dest)
375     {
376         LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value);
377
378         if (logicalImm.isValid()) {
379             m_assembler.and_<64>(dest, src, logicalImm);
380             return;
381         }
382
383         move(imm, getCachedDataTempRegisterIDAndInvalidate());
384         m_assembler.and_<64>(dest, src, dataTempRegister);
385     }
386
387     void and64(RegisterID src, RegisterID dest)
388     {
389         m_assembler.and_<64>(dest, dest, src);
390     }
391
392     void and64(TrustedImm32 imm, RegisterID dest)
393     {
394         LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
395
396         if (logicalImm.isValid()) {
397             m_assembler.and_<64>(dest, dest, logicalImm);
398             return;
399         }
400
401         signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
402         m_assembler.and_<64>(dest, dest, dataTempRegister);
403     }
404
405     void and64(TrustedImmPtr imm, RegisterID dest)
406     {
407         LogicalImmediate logicalImm = LogicalImmediate::create64(reinterpret_cast<uint64_t>(imm.m_value));
408
409         if (logicalImm.isValid()) {
410             m_assembler.and_<64>(dest, dest, logicalImm);
411             return;
412         }
413
414         move(imm, getCachedDataTempRegisterIDAndInvalidate());
415         m_assembler.and_<64>(dest, dest, dataTempRegister);
416     }
417     
418     void countLeadingZeros32(RegisterID src, RegisterID dest)
419     {
420         m_assembler.clz<32>(dest, src);
421     }
422
423     void countLeadingZeros64(RegisterID src, RegisterID dest)
424     {
425         m_assembler.clz<64>(dest, src);
426     }
427
428     void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
429     {
430         m_assembler.lsl<32>(dest, src, shiftAmount);
431     }
432
433     void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
434     {
435         m_assembler.lsl<32>(dest, src, imm.m_value & 0x1f);
436     }
437
438     void lshift32(RegisterID shiftAmount, RegisterID dest)
439     {
440         lshift32(dest, shiftAmount, dest);
441     }
442
443     void lshift32(TrustedImm32 imm, RegisterID dest)
444     {
445         lshift32(dest, imm, dest);
446     }
447
448     void lshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
449     {
450         m_assembler.lsl<64>(dest, src, shiftAmount);
451     }
452
453     void lshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
454     {
455         m_assembler.lsl<64>(dest, src, imm.m_value & 0x3f);
456     }
457
458     void lshift64(RegisterID shiftAmount, RegisterID dest)
459     {
460         lshift64(dest, shiftAmount, dest);
461     }
462
463     void lshift64(TrustedImm32 imm, RegisterID dest)
464     {
465         lshift64(dest, imm, dest);
466     }
467
468     void mul32(RegisterID left, RegisterID right, RegisterID dest)
469     {
470         m_assembler.mul<32>(dest, left, right);
471     }
472     
473     void mul32(RegisterID src, RegisterID dest)
474     {
475         m_assembler.mul<32>(dest, dest, src);
476     }
477
478     void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
479     {
480         move(imm, getCachedDataTempRegisterIDAndInvalidate());
481         m_assembler.mul<32>(dest, src, dataTempRegister);
482     }
483
484     void mul64(RegisterID src, RegisterID dest)
485     {
486         m_assembler.mul<64>(dest, dest, src);
487     }
488
489     void mul64(RegisterID left, RegisterID right, RegisterID dest)
490     {
491         m_assembler.mul<64>(dest, left, right);
492     }
493
494     void multiplyAdd32(RegisterID mulLeft, RegisterID mulRight, RegisterID summand, RegisterID dest)
495     {
496         m_assembler.madd<32>(dest, mulLeft, mulRight, summand);
497     }
498
499     void multiplySub32(RegisterID mulLeft, RegisterID mulRight, RegisterID minuend, RegisterID dest)
500     {
501         m_assembler.msub<32>(dest, mulLeft, mulRight, minuend);
502     }
503
504     void multiplyNeg32(RegisterID mulLeft, RegisterID mulRight, RegisterID dest)
505     {
506         m_assembler.msub<32>(dest, mulLeft, mulRight, ARM64Registers::zr);
507     }
508
509     void multiplyAdd64(RegisterID mulLeft, RegisterID mulRight, RegisterID summand, RegisterID dest)
510     {
511         m_assembler.madd<64>(dest, mulLeft, mulRight, summand);
512     }
513
514     void multiplySub64(RegisterID mulLeft, RegisterID mulRight, RegisterID minuend, RegisterID dest)
515     {
516         m_assembler.msub<64>(dest, mulLeft, mulRight, minuend);
517     }
518
519     void multiplyNeg64(RegisterID mulLeft, RegisterID mulRight, RegisterID dest)
520     {
521         m_assembler.msub<64>(dest, mulLeft, mulRight, ARM64Registers::zr);
522     }
523
524     void div32(RegisterID dividend, RegisterID divisor, RegisterID dest)
525     {
526         m_assembler.sdiv<32>(dest, dividend, divisor);
527     }
528
529     void div64(RegisterID dividend, RegisterID divisor, RegisterID dest)
530     {
531         m_assembler.sdiv<64>(dest, dividend, divisor);
532     }
533
534     void neg32(RegisterID dest)
535     {
536         m_assembler.neg<32>(dest, dest);
537     }
538
539     void neg64(RegisterID dest)
540     {
541         m_assembler.neg<64>(dest, dest);
542     }
543
544     void or32(RegisterID src, RegisterID dest)
545     {
546         or32(dest, src, dest);
547     }
548
549     void or32(RegisterID op1, RegisterID op2, RegisterID dest)
550     {
551         m_assembler.orr<32>(dest, op1, op2);
552     }
553
554     void or32(TrustedImm32 imm, RegisterID dest)
555     {
556         or32(imm, dest, dest);
557     }
558
559     void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
560     {
561         LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
562
563         if (logicalImm.isValid()) {
564             m_assembler.orr<32>(dest, src, logicalImm);
565             return;
566         }
567
568         ASSERT(src != dataTempRegister);
569         move(imm, getCachedDataTempRegisterIDAndInvalidate());
570         m_assembler.orr<32>(dest, src, dataTempRegister);
571     }
572
573     void or32(RegisterID src, AbsoluteAddress address)
574     {
575         load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
576         m_assembler.orr<32>(dataTempRegister, dataTempRegister, src);
577         store32(dataTempRegister, address.m_ptr);
578     }
579
580     void or32(TrustedImm32 imm, AbsoluteAddress address)
581     {
582         LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
583         if (logicalImm.isValid()) {
584             load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
585             m_assembler.orr<32>(dataTempRegister, dataTempRegister, logicalImm);
586             store32(dataTempRegister, address.m_ptr);
587         } else {
588             load32(address.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
589             or32(imm, memoryTempRegister, getCachedDataTempRegisterIDAndInvalidate());
590             store32(dataTempRegister, address.m_ptr);
591         }
592     }
593
594     void or32(TrustedImm32 imm, Address address)
595     {
596         load32(address, getCachedDataTempRegisterIDAndInvalidate());
597         or32(imm, dataTempRegister, dataTempRegister);
598         store32(dataTempRegister, address);
599     }
600
601     void or64(RegisterID src, RegisterID dest)
602     {
603         or64(dest, src, dest);
604     }
605
606     void or64(RegisterID op1, RegisterID op2, RegisterID dest)
607     {
608         m_assembler.orr<64>(dest, op1, op2);
609     }
610
611     void or64(TrustedImm32 imm, RegisterID dest)
612     {
613         or64(imm, dest, dest);
614     }
615
616     void or64(TrustedImm32 imm, RegisterID src, RegisterID dest)
617     {
618         LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
619
620         if (logicalImm.isValid()) {
621             m_assembler.orr<64>(dest, src, logicalImm);
622             return;
623         }
624
625         signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
626         m_assembler.orr<64>(dest, src, dataTempRegister);
627     }
628
629     void or64(TrustedImm64 imm, RegisterID src, RegisterID dest)
630     {
631         LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value);
632
633         if (logicalImm.isValid()) {
634             m_assembler.orr<64>(dest, src, logicalImm);
635             return;
636         }
637
638         move(imm, getCachedDataTempRegisterIDAndInvalidate());
639         m_assembler.orr<64>(dest, src, dataTempRegister);
640     }
641
642     void or64(TrustedImm64 imm, RegisterID dest)
643     {
644         LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
645
646         if (logicalImm.isValid()) {
647             m_assembler.orr<64>(dest, dest, logicalImm);
648             return;
649         }
650
651         move(imm, getCachedDataTempRegisterIDAndInvalidate());
652         m_assembler.orr<64>(dest, dest, dataTempRegister);
653     }
654
655     void rotateRight64(TrustedImm32 imm, RegisterID srcDst)
656     {
657         m_assembler.ror<64>(srcDst, srcDst, imm.m_value & 63);
658     }
659
660     void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
661     {
662         m_assembler.asr<32>(dest, src, shiftAmount);
663     }
664
665     void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
666     {
667         m_assembler.asr<32>(dest, src, imm.m_value & 0x1f);
668     }
669
670     void rshift32(RegisterID shiftAmount, RegisterID dest)
671     {
672         rshift32(dest, shiftAmount, dest);
673     }
674     
675     void rshift32(TrustedImm32 imm, RegisterID dest)
676     {
677         rshift32(dest, imm, dest);
678     }
679     
680     void rshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
681     {
682         m_assembler.asr<64>(dest, src, shiftAmount);
683     }
684     
685     void rshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
686     {
687         m_assembler.asr<64>(dest, src, imm.m_value & 0x3f);
688     }
689     
690     void rshift64(RegisterID shiftAmount, RegisterID dest)
691     {
692         rshift64(dest, shiftAmount, dest);
693     }
694     
695     void rshift64(TrustedImm32 imm, RegisterID dest)
696     {
697         rshift64(dest, imm, dest);
698     }
699
700     void sub32(RegisterID src, RegisterID dest)
701     {
702         m_assembler.sub<32>(dest, dest, src);
703     }
704
705     void sub32(TrustedImm32 imm, RegisterID dest)
706     {
707         if (isUInt12(imm.m_value)) {
708             m_assembler.sub<32>(dest, dest, UInt12(imm.m_value));
709             return;
710         }
711         if (isUInt12(-imm.m_value)) {
712             m_assembler.add<32>(dest, dest, UInt12(-imm.m_value));
713             return;
714         }
715
716         move(imm, getCachedDataTempRegisterIDAndInvalidate());
717         m_assembler.sub<32>(dest, dest, dataTempRegister);
718     }
719
720     void sub32(TrustedImm32 imm, Address address)
721     {
722         load32(address, getCachedDataTempRegisterIDAndInvalidate());
723
724         if (isUInt12(imm.m_value))
725             m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
726         else if (isUInt12(-imm.m_value))
727             m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
728         else {
729             move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
730             m_assembler.sub<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
731         }
732
733         store32(dataTempRegister, address);
734     }
735
736     void sub32(TrustedImm32 imm, AbsoluteAddress address)
737     {
738         load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
739
740         if (isUInt12(imm.m_value)) {
741             m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
742             store32(dataTempRegister, address.m_ptr);
743             return;
744         }
745
746         if (isUInt12(-imm.m_value)) {
747             m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
748             store32(dataTempRegister, address.m_ptr);
749             return;
750         }
751
752         move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
753         m_assembler.sub<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
754         store32(dataTempRegister, address.m_ptr);
755     }
756
757     void sub32(Address src, RegisterID dest)
758     {
759         load32(src, getCachedDataTempRegisterIDAndInvalidate());
760         sub32(dataTempRegister, dest);
761     }
762
763     void sub64(RegisterID src, RegisterID dest)
764     {
765         m_assembler.sub<64>(dest, dest, src);
766     }
767
768     void sub64(RegisterID a, RegisterID b, RegisterID dest)
769     {
770         m_assembler.sub<64>(dest, a, b);
771     }
772     
773     void sub64(TrustedImm32 imm, RegisterID dest)
774     {
775         if (isUInt12(imm.m_value)) {
776             m_assembler.sub<64>(dest, dest, UInt12(imm.m_value));
777             return;
778         }
779         if (isUInt12(-imm.m_value)) {
780             m_assembler.add<64>(dest, dest, UInt12(-imm.m_value));
781             return;
782         }
783
784         signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
785         m_assembler.sub<64>(dest, dest, dataTempRegister);
786     }
787     
788     void sub64(TrustedImm64 imm, RegisterID dest)
789     {
790         intptr_t immediate = imm.m_value;
791
792         if (isUInt12(immediate)) {
793             m_assembler.sub<64>(dest, dest, UInt12(static_cast<int32_t>(immediate)));
794             return;
795         }
796         if (isUInt12(-immediate)) {
797             m_assembler.add<64>(dest, dest, UInt12(static_cast<int32_t>(-immediate)));
798             return;
799         }
800
801         move(imm, getCachedDataTempRegisterIDAndInvalidate());
802         m_assembler.sub<64>(dest, dest, dataTempRegister);
803     }
804
805     void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
806     {
807         m_assembler.lsr<32>(dest, src, shiftAmount);
808     }
809     
810     void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
811     {
812         m_assembler.lsr<32>(dest, src, imm.m_value & 0x1f);
813     }
814
815     void urshift32(RegisterID shiftAmount, RegisterID dest)
816     {
817         urshift32(dest, shiftAmount, dest);
818     }
819     
820     void urshift32(TrustedImm32 imm, RegisterID dest)
821     {
822         urshift32(dest, imm, dest);
823     }
824
825     void urshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
826     {
827         m_assembler.lsr<64>(dest, src, shiftAmount);
828     }
829     
830     void urshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
831     {
832         m_assembler.lsr<64>(dest, src, imm.m_value & 0x3f);
833     }
834
835     void urshift64(RegisterID shiftAmount, RegisterID dest)
836     {
837         urshift64(dest, shiftAmount, dest);
838     }
839     
840     void urshift64(TrustedImm32 imm, RegisterID dest)
841     {
842         urshift64(dest, imm, dest);
843     }
844
845     void xor32(RegisterID src, RegisterID dest)
846     {
847         xor32(dest, src, dest);
848     }
849
850     void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
851     {
852         m_assembler.eor<32>(dest, op1, op2);
853     }
854
855     void xor32(TrustedImm32 imm, RegisterID dest)
856     {
857         xor32(imm, dest, dest);
858     }
859
860     void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
861     {
862         if (imm.m_value == -1)
863             m_assembler.mvn<32>(dest, src);
864         else {
865             LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
866
867             if (logicalImm.isValid()) {
868                 m_assembler.eor<32>(dest, src, logicalImm);
869                 return;
870             }
871
872             move(imm, getCachedDataTempRegisterIDAndInvalidate());
873             m_assembler.eor<32>(dest, src, dataTempRegister);
874         }
875     }
876
877     void xor64(RegisterID src, Address address)
878     {
879         load64(address, getCachedDataTempRegisterIDAndInvalidate());
880         m_assembler.eor<64>(dataTempRegister, dataTempRegister, src);
881         store64(dataTempRegister, address);
882     }
883
884     void xor64(RegisterID src, RegisterID dest)
885     {
886         xor64(dest, src, dest);
887     }
888
889     void xor64(RegisterID op1, RegisterID op2, RegisterID dest)
890     {
891         m_assembler.eor<64>(dest, op1, op2);
892     }
893
894     void xor64(TrustedImm32 imm, RegisterID dest)
895     {
896         xor64(imm, dest, dest);
897     }
898
899     void xor64(TrustedImm64 imm, RegisterID src, RegisterID dest)
900     {
901         if (imm.m_value == -1)
902             m_assembler.mvn<64>(dest, src);
903         else {
904             LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value);
905
906             if (logicalImm.isValid()) {
907                 m_assembler.eor<64>(dest, src, logicalImm);
908                 return;
909             }
910
911             move(imm, getCachedDataTempRegisterIDAndInvalidate());
912             m_assembler.eor<64>(dest, src, dataTempRegister);
913         }
914     }
915
916     void xor64(TrustedImm32 imm, RegisterID src, RegisterID dest)
917     {
918         if (imm.m_value == -1)
919             m_assembler.mvn<64>(dest, src);
920         else {
921             LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
922
923             if (logicalImm.isValid()) {
924                 m_assembler.eor<64>(dest, src, logicalImm);
925                 return;
926             }
927
928             signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
929             m_assembler.eor<64>(dest, src, dataTempRegister);
930         }
931     }
932
933     void not32(RegisterID src, RegisterID dest)
934     {
935         m_assembler.mvn<32>(dest, src);
936     }
937
938     void not64(RegisterID src, RegisterID dest)
939     {
940         m_assembler.mvn<64>(dest, src);
941     }
942
943     // Memory access operations:
944
945     void load64(ImplicitAddress address, RegisterID dest)
946     {
947         if (tryLoadWithOffset<64>(dest, address.base, address.offset))
948             return;
949
950         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
951         m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
952     }
953
954     void load64(BaseIndex address, RegisterID dest)
955     {
956         if (!address.offset && (!address.scale || address.scale == 3)) {
957             m_assembler.ldr<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
958             return;
959         }
960
961         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
962         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
963         m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
964     }
965
966     void load64(const void* address, RegisterID dest)
967     {
968         load<64>(address, dest);
969     }
970
971     void load64(RegisterID src, PostIndex simm, RegisterID dest)
972     {
973         m_assembler.ldr<64>(dest, src, simm);
974     }
975
976     DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest)
977     {
978         DataLabel32 label(this);
979         signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
980         m_assembler.ldr<64>(dest, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
981         return label;
982     }
983     
984     DataLabelCompact load64WithCompactAddressOffsetPatch(Address address, RegisterID dest)
985     {
986         ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
987         DataLabelCompact label(this);
988         m_assembler.ldr<64>(dest, address.base, address.offset);
989         return label;
990     }
991
992     void loadPair64(RegisterID src, RegisterID dest1, RegisterID dest2)
993     {
994         loadPair64(src, TrustedImm32(0), dest1, dest2);
995     }
996
997     void loadPair64(RegisterID src, TrustedImm32 offset, RegisterID dest1, RegisterID dest2)
998     {
999         m_assembler.ldp<64>(dest1, dest2, src, offset.m_value);
1000     }
1001
1002     void loadPair64WithNonTemporalAccess(RegisterID src, RegisterID dest1, RegisterID dest2)
1003     {
1004         loadPair64WithNonTemporalAccess(src, TrustedImm32(0), dest1, dest2);
1005     }
1006
1007     void loadPair64WithNonTemporalAccess(RegisterID src, TrustedImm32 offset, RegisterID dest1, RegisterID dest2)
1008     {
1009         m_assembler.ldnp<64>(dest1, dest2, src, offset.m_value);
1010     }
1011
1012     void abortWithReason(AbortReason reason)
1013     {
1014         move(TrustedImm32(reason), dataTempRegister);
1015         breakpoint();
1016     }
1017
1018     void abortWithReason(AbortReason reason, intptr_t misc)
1019     {
1020         move(TrustedImm64(misc), memoryTempRegister);
1021         abortWithReason(reason);
1022     }
1023
1024     ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
1025     {
1026         ConvertibleLoadLabel result(this);
1027         ASSERT(!(address.offset & ~0xff8));
1028         m_assembler.ldr<64>(dest, address.base, address.offset);
1029         return result;
1030     }
1031
1032     void load32(ImplicitAddress address, RegisterID dest)
1033     {
1034         if (tryLoadWithOffset<32>(dest, address.base, address.offset))
1035             return;
1036
1037         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1038         m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
1039     }
1040
1041     void load32(BaseIndex address, RegisterID dest)
1042     {
1043         if (!address.offset && (!address.scale || address.scale == 2)) {
1044             m_assembler.ldr<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1045             return;
1046         }
1047
1048         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1049         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1050         m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
1051     }
1052
1053     void load32(const void* address, RegisterID dest)
1054     {
1055         load<32>(address, dest);
1056     }
1057
1058     DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
1059     {
1060         DataLabel32 label(this);
1061         signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
1062         m_assembler.ldr<32>(dest, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
1063         return label;
1064     }
1065     
1066     DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
1067     {
1068         ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
1069         DataLabelCompact label(this);
1070         m_assembler.ldr<32>(dest, address.base, address.offset);
1071         return label;
1072     }
1073
1074     void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
1075     {
1076         load32(address, dest);
1077     }
1078
1079     void load16(ImplicitAddress address, RegisterID dest)
1080     {
1081         if (tryLoadWithOffset<16>(dest, address.base, address.offset))
1082             return;
1083
1084         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1085         m_assembler.ldrh(dest, address.base, memoryTempRegister);
1086     }
1087     
1088     void load16(BaseIndex address, RegisterID dest)
1089     {
1090         if (!address.offset && (!address.scale || address.scale == 1)) {
1091             m_assembler.ldrh(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1092             return;
1093         }
1094
1095         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1096         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1097         m_assembler.ldrh(dest, address.base, memoryTempRegister);
1098     }
1099     
1100     void load16Unaligned(BaseIndex address, RegisterID dest)
1101     {
1102         load16(address, dest);
1103     }
1104
1105     void load16SignedExtendTo32(ImplicitAddress address, RegisterID dest)
1106     {
1107         if (tryLoadSignedWithOffset<16>(dest, address.base, address.offset))
1108             return;
1109
1110         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1111         m_assembler.ldrsh<32>(dest, address.base, memoryTempRegister);
1112     }
1113
1114     void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
1115     {
1116         if (!address.offset && (!address.scale || address.scale == 1)) {
1117             m_assembler.ldrsh<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1118             return;
1119         }
1120
1121         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1122         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1123         m_assembler.ldrsh<32>(dest, address.base, memoryTempRegister);
1124     }
1125
1126     void zeroExtend16To32(RegisterID src, RegisterID dest)
1127     {
1128         m_assembler.uxth<32>(dest, src);
1129     }
1130
1131     void signExtend16To32(RegisterID src, RegisterID dest)
1132     {
1133         m_assembler.sxth<32>(dest, src);
1134     }
1135
1136     void load8(ImplicitAddress address, RegisterID dest)
1137     {
1138         if (tryLoadWithOffset<8>(dest, address.base, address.offset))
1139             return;
1140
1141         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1142         m_assembler.ldrb(dest, address.base, memoryTempRegister);
1143     }
1144
1145     void load8(BaseIndex address, RegisterID dest)
1146     {
1147         if (!address.offset && !address.scale) {
1148             m_assembler.ldrb(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1149             return;
1150         }
1151
1152         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1153         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1154         m_assembler.ldrb(dest, address.base, memoryTempRegister);
1155     }
1156     
1157     void load8(const void* address, RegisterID dest)
1158     {
1159         moveToCachedReg(TrustedImmPtr(address), cachedMemoryTempRegister());
1160         m_assembler.ldrb(dest, memoryTempRegister, ARM64Registers::zr);
1161         if (dest == memoryTempRegister)
1162             cachedMemoryTempRegister().invalidate();
1163     }
1164
1165     void load8(RegisterID src, PostIndex simm, RegisterID dest)
1166     {
1167         m_assembler.ldrb(dest, src, simm);
1168     }
1169
1170     void load8SignedExtendTo32(ImplicitAddress address, RegisterID dest)
1171     {
1172         if (tryLoadSignedWithOffset<8>(dest, address.base, address.offset))
1173             return;
1174
1175         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1176         m_assembler.ldrsb<32>(dest, address.base, memoryTempRegister);
1177     }
1178
1179     void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
1180     {
1181         if (!address.offset && !address.scale) {
1182             m_assembler.ldrsb<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1183             return;
1184         }
1185
1186         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1187         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1188         m_assembler.ldrsb<32>(dest, address.base, memoryTempRegister);
1189     }
1190
1191     void zeroExtend8To32(RegisterID src, RegisterID dest)
1192     {
1193         m_assembler.uxtb<32>(dest, src);
1194     }
1195
1196     void signExtend8To32(RegisterID src, RegisterID dest)
1197     {
1198         m_assembler.sxtb<32>(dest, src);
1199     }
1200
1201     void store64(RegisterID src, ImplicitAddress address)
1202     {
1203         if (tryStoreWithOffset<64>(src, address.base, address.offset))
1204             return;
1205
1206         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1207         m_assembler.str<64>(src, address.base, memoryTempRegister);
1208     }
1209
1210     void store64(RegisterID src, BaseIndex address)
1211     {
1212         if (!address.offset && (!address.scale || address.scale == 3)) {
1213             m_assembler.str<64>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1214             return;
1215         }
1216
1217         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1218         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1219         m_assembler.str<64>(src, address.base, memoryTempRegister);
1220     }
1221     
1222     void store64(RegisterID src, const void* address)
1223     {
1224         store<64>(src, address);
1225     }
1226
1227     void store64(TrustedImm32 imm, ImplicitAddress address)
1228     {
1229         store64(TrustedImm64(imm.m_value), address);
1230     }
1231
1232     void store64(TrustedImm64 imm, ImplicitAddress address)
1233     {
1234         if (!imm.m_value) {
1235             store64(ARM64Registers::zr, address);
1236             return;
1237         }
1238
1239         moveToCachedReg(imm, dataMemoryTempRegister());
1240         store64(dataTempRegister, address);
1241     }
1242
1243     void store64(TrustedImm64 imm, BaseIndex address)
1244     {
1245         if (!imm.m_value) {
1246             store64(ARM64Registers::zr, address);
1247             return;
1248         }
1249
1250         moveToCachedReg(imm, dataMemoryTempRegister());
1251         store64(dataTempRegister, address);
1252     }
1253
1254     void store64(RegisterID src, RegisterID dest, PostIndex simm)
1255     {
1256         m_assembler.str<64>(src, dest, simm);
1257     }
1258     
1259     DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address)
1260     {
1261         DataLabel32 label(this);
1262         signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
1263         m_assembler.str<64>(src, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
1264         return label;
1265     }
1266
1267     void storePair64(RegisterID src1, RegisterID src2, RegisterID dest)
1268     {
1269         storePair64(src1, src2, dest, TrustedImm32(0));
1270     }
1271
1272     void storePair64(RegisterID src1, RegisterID src2, RegisterID dest, TrustedImm32 offset)
1273     {
1274         m_assembler.stp<64>(src1, src2, dest, offset.m_value);
1275     }
1276
1277     void storePair64WithNonTemporalAccess(RegisterID src1, RegisterID src2, RegisterID dest)
1278     {
1279         storePair64WithNonTemporalAccess(src1, src2, dest, TrustedImm32(0));
1280     }
1281
1282     void storePair64WithNonTemporalAccess(RegisterID src1, RegisterID src2, RegisterID dest, TrustedImm32 offset)
1283     {
1284         m_assembler.stnp<64>(src1, src2, dest, offset.m_value);
1285     }
1286
1287     void store32(RegisterID src, ImplicitAddress address)
1288     {
1289         if (tryStoreWithOffset<32>(src, address.base, address.offset))
1290             return;
1291
1292         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1293         m_assembler.str<32>(src, address.base, memoryTempRegister);
1294     }
1295
1296     void store32(RegisterID src, BaseIndex address)
1297     {
1298         if (!address.offset && (!address.scale || address.scale == 2)) {
1299             m_assembler.str<32>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1300             return;
1301         }
1302
1303         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1304         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1305         m_assembler.str<32>(src, address.base, memoryTempRegister);
1306     }
1307
1308     void store32(RegisterID src, const void* address)
1309     {
1310         store<32>(src, address);
1311     }
1312
1313     void store32(TrustedImm32 imm, ImplicitAddress address)
1314     {
1315         if (!imm.m_value) {
1316             store32(ARM64Registers::zr, address);
1317             return;
1318         }
1319
1320         moveToCachedReg(imm, dataMemoryTempRegister());
1321         store32(dataTempRegister, address);
1322     }
1323
1324     void store32(TrustedImm32 imm, BaseIndex address)
1325     {
1326         if (!imm.m_value) {
1327             store32(ARM64Registers::zr, address);
1328             return;
1329         }
1330
1331         moveToCachedReg(imm, dataMemoryTempRegister());
1332         store32(dataTempRegister, address);
1333     }
1334
1335     void store32(TrustedImm32 imm, const void* address)
1336     {
1337         if (!imm.m_value) {
1338             store32(ARM64Registers::zr, address);
1339             return;
1340         }
1341
1342         moveToCachedReg(imm, dataMemoryTempRegister());
1343         store32(dataTempRegister, address);
1344     }
1345
1346     void storeZero32(ImplicitAddress address)
1347     {
1348         store32(ARM64Registers::zr, address);
1349     }
1350
1351     void storeZero32(BaseIndex address)
1352     {
1353         store32(ARM64Registers::zr, address);
1354     }
1355
1356     DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
1357     {
1358         DataLabel32 label(this);
1359         signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
1360         m_assembler.str<32>(src, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
1361         return label;
1362     }
1363
1364     void store16(RegisterID src, ImplicitAddress address)
1365     {
1366         if (tryStoreWithOffset<16>(src, address.base, address.offset))
1367             return;
1368
1369         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1370         m_assembler.strh(src, address.base, memoryTempRegister);
1371     }
1372
1373     void store16(RegisterID src, BaseIndex address)
1374     {
1375         if (!address.offset && (!address.scale || address.scale == 1)) {
1376             m_assembler.strh(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1377             return;
1378         }
1379
1380         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1381         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1382         m_assembler.strh(src, address.base, memoryTempRegister);
1383     }
1384
1385     void store8(RegisterID src, BaseIndex address)
1386     {
1387         if (!address.offset && !address.scale) {
1388             m_assembler.strb(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1389             return;
1390         }
1391
1392         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1393         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1394         m_assembler.strb(src, address.base, memoryTempRegister);
1395     }
1396
1397     void store8(RegisterID src, void* address)
1398     {
1399         move(TrustedImmPtr(address), getCachedMemoryTempRegisterIDAndInvalidate());
1400         m_assembler.strb(src, memoryTempRegister, 0);
1401     }
1402
1403     void store8(RegisterID src, ImplicitAddress address)
1404     {
1405         if (tryStoreWithOffset<8>(src, address.base, address.offset))
1406             return;
1407
1408         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1409         m_assembler.strb(src, address.base, memoryTempRegister);
1410     }
1411
1412     void store8(TrustedImm32 imm, void* address)
1413     {
1414         TrustedImm32 imm8(static_cast<int8_t>(imm.m_value));
1415         if (!imm8.m_value) {
1416             store8(ARM64Registers::zr, address);
1417             return;
1418         }
1419
1420         move(imm8, getCachedDataTempRegisterIDAndInvalidate());
1421         store8(dataTempRegister, address);
1422     }
1423
1424     void store8(TrustedImm32 imm, ImplicitAddress address)
1425     {
1426         TrustedImm32 imm8(static_cast<int8_t>(imm.m_value));
1427         if (!imm8.m_value) {
1428             store8(ARM64Registers::zr, address);
1429             return;
1430         }
1431
1432         move(imm8, getCachedDataTempRegisterIDAndInvalidate());
1433         store8(dataTempRegister, address);
1434     }
1435
1436     void store8(RegisterID src, RegisterID dest, PostIndex simm)
1437     {
1438         m_assembler.strb(src, dest, simm);
1439     }
1440
1441     // Floating-point operations:
1442
1443     static bool supportsFloatingPoint() { return true; }
1444     static bool supportsFloatingPointTruncate() { return true; }
1445     static bool supportsFloatingPointSqrt() { return true; }
1446     static bool supportsFloatingPointAbs() { return true; }
1447     static bool supportsFloatingPointRounding() { return true; }
1448
1449     enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
1450
1451     void absDouble(FPRegisterID src, FPRegisterID dest)
1452     {
1453         m_assembler.fabs<64>(dest, src);
1454     }
1455
1456     void absFloat(FPRegisterID src, FPRegisterID dest)
1457     {
1458         m_assembler.fabs<32>(dest, src);
1459     }
1460
1461     void addDouble(FPRegisterID src, FPRegisterID dest)
1462     {
1463         addDouble(dest, src, dest);
1464     }
1465
1466     void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1467     {
1468         m_assembler.fadd<64>(dest, op1, op2);
1469     }
1470
1471     void addDouble(Address src, FPRegisterID dest)
1472     {
1473         loadDouble(src, fpTempRegister);
1474         addDouble(fpTempRegister, dest);
1475     }
1476
1477     void addDouble(AbsoluteAddress address, FPRegisterID dest)
1478     {
1479         loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister);
1480         addDouble(fpTempRegister, dest);
1481     }
1482
1483     void addFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1484     {
1485         m_assembler.fadd<32>(dest, op1, op2);
1486     }
1487
1488     void ceilDouble(FPRegisterID src, FPRegisterID dest)
1489     {
1490         m_assembler.frintp<64>(dest, src);
1491     }
1492
1493     void ceilFloat(FPRegisterID src, FPRegisterID dest)
1494     {
1495         m_assembler.frintp<32>(dest, src);
1496     }
1497
1498     void floorDouble(FPRegisterID src, FPRegisterID dest)
1499     {
1500         m_assembler.frintm<64>(dest, src);
1501     }
1502
1503     void floorFloat(FPRegisterID src, FPRegisterID dest)
1504     {
1505         m_assembler.frintm<32>(dest, src);
1506     }
1507
1508     void roundTowardZeroDouble(FPRegisterID src, FPRegisterID dest)
1509     {
1510         m_assembler.frintz<64>(dest, src);
1511     }
1512
1513     void roundTowardZeroFloat(FPRegisterID src, FPRegisterID dest)
1514     {
1515         m_assembler.frintz<32>(dest, src);
1516     }
1517
1518     // Convert 'src' to an integer, and places the resulting 'dest'.
1519     // If the result is not representable as a 32 bit value, branch.
1520     // May also branch for some values that are representable in 32 bits
1521     // (specifically, in this case, 0).
1522     void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true)
1523     {
1524         m_assembler.fcvtns<32, 64>(dest, src);
1525
1526         // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
1527         m_assembler.scvtf<64, 32>(fpTempRegister, dest);
1528         failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister));
1529
1530         // Test for negative zero.
1531         if (negZeroCheck) {
1532             Jump valueIsNonZero = branchTest32(NonZero, dest);
1533             RegisterID scratch = getCachedMemoryTempRegisterIDAndInvalidate();
1534             m_assembler.fmov<64>(scratch, src);
1535             failureCases.append(makeTestBitAndBranch(scratch, 63, IsNonZero));
1536             valueIsNonZero.link(this);
1537         }
1538     }
1539
1540     Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
1541     {
1542         m_assembler.fcmp<64>(left, right);
1543         return jumpAfterFloatingPointCompare(cond);
1544     }
1545
1546     Jump branchFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
1547     {
1548         m_assembler.fcmp<32>(left, right);
1549         return jumpAfterFloatingPointCompare(cond);
1550     }
1551
1552     Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
1553     {
1554         m_assembler.fcmp_0<64>(reg);
1555         Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1556         Jump result = makeBranch(ARM64Assembler::ConditionNE);
1557         unordered.link(this);
1558         return result;
1559     }
1560
1561     Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID)
1562     {
1563         m_assembler.fcmp_0<64>(reg);
1564         Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1565         Jump notEqual = makeBranch(ARM64Assembler::ConditionNE);
1566         unordered.link(this);
1567         // We get here if either unordered or equal.
1568         Jump result = jump();
1569         notEqual.link(this);
1570         return result;
1571     }
1572
1573     Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
1574     {
1575         // Truncate to a 64-bit integer in dataTempRegister, copy the low 32-bit to dest.
1576         m_assembler.fcvtzs<64, 64>(getCachedDataTempRegisterIDAndInvalidate(), src);
1577         zeroExtend32ToPtr(dataTempRegister, dest);
1578         // Check thlow 32-bits sign extend to be equal to the full value.
1579         m_assembler.cmp<64>(dataTempRegister, dataTempRegister, ARM64Assembler::SXTW, 0);
1580         return Jump(makeBranch(branchType == BranchIfTruncateSuccessful ? Equal : NotEqual));
1581     }
1582
1583     void convertDoubleToFloat(FPRegisterID src, FPRegisterID dest)
1584     {
1585         m_assembler.fcvt<32, 64>(dest, src);
1586     }
1587
1588     void convertFloatToDouble(FPRegisterID src, FPRegisterID dest)
1589     {
1590         m_assembler.fcvt<64, 32>(dest, src);
1591     }
1592     
1593     void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest)
1594     {
1595         move(imm, getCachedDataTempRegisterIDAndInvalidate());
1596         convertInt32ToDouble(dataTempRegister, dest);
1597     }
1598     
1599     void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
1600     {
1601         m_assembler.scvtf<64, 32>(dest, src);
1602     }
1603
1604     void convertInt32ToDouble(Address address, FPRegisterID dest)
1605     {
1606         load32(address, getCachedDataTempRegisterIDAndInvalidate());
1607         convertInt32ToDouble(dataTempRegister, dest);
1608     }
1609
1610     void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest)
1611     {
1612         load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1613         convertInt32ToDouble(dataTempRegister, dest);
1614     }
1615
1616     void convertInt32ToFloat(RegisterID src, FPRegisterID dest)
1617     {
1618         m_assembler.scvtf<32, 32>(dest, src);
1619     }
1620     
1621     void convertInt64ToDouble(RegisterID src, FPRegisterID dest)
1622     {
1623         m_assembler.scvtf<64, 64>(dest, src);
1624     }
1625
1626     void convertInt64ToFloat(RegisterID src, FPRegisterID dest)
1627     {
1628         m_assembler.scvtf<32, 64>(dest, src);
1629     }
1630     
1631     void divDouble(FPRegisterID src, FPRegisterID dest)
1632     {
1633         divDouble(dest, src, dest);
1634     }
1635
1636     void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1637     {
1638         m_assembler.fdiv<64>(dest, op1, op2);
1639     }
1640
1641     void divFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1642     {
1643         m_assembler.fdiv<32>(dest, op1, op2);
1644     }
1645
1646     void loadDouble(ImplicitAddress address, FPRegisterID dest)
1647     {
1648         if (tryLoadWithOffset<64>(dest, address.base, address.offset))
1649             return;
1650
1651         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1652         m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
1653     }
1654
1655     void loadDouble(BaseIndex address, FPRegisterID dest)
1656     {
1657         if (!address.offset && (!address.scale || address.scale == 3)) {
1658             m_assembler.ldr<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1659             return;
1660         }
1661
1662         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1663         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1664         m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
1665     }
1666     
1667     void loadDouble(TrustedImmPtr address, FPRegisterID dest)
1668     {
1669         moveToCachedReg(address, cachedMemoryTempRegister());
1670         m_assembler.ldr<64>(dest, memoryTempRegister, ARM64Registers::zr);
1671     }
1672
1673     void loadFloat(ImplicitAddress address, FPRegisterID dest)
1674     {
1675         if (tryLoadWithOffset<32>(dest, address.base, address.offset))
1676             return;
1677
1678         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1679         m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
1680     }
1681
1682     void loadFloat(BaseIndex address, FPRegisterID dest)
1683     {
1684         if (!address.offset && (!address.scale || address.scale == 2)) {
1685             m_assembler.ldr<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1686             return;
1687         }
1688
1689         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1690         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1691         m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
1692     }
1693
1694     void moveDouble(FPRegisterID src, FPRegisterID dest)
1695     {
1696         m_assembler.fmov<64>(dest, src);
1697     }
1698
1699     void moveZeroToDouble(FPRegisterID reg)
1700     {
1701         m_assembler.fmov<64>(reg, ARM64Registers::zr);
1702     }
1703
1704     void moveDoubleTo64(FPRegisterID src, RegisterID dest)
1705     {
1706         m_assembler.fmov<64>(dest, src);
1707     }
1708
1709     void moveFloatTo32(FPRegisterID src, RegisterID dest)
1710     {
1711         m_assembler.fmov<32>(dest, src);
1712     }
1713
1714     void move64ToDouble(RegisterID src, FPRegisterID dest)
1715     {
1716         m_assembler.fmov<64>(dest, src);
1717     }
1718
1719     void move32ToFloat(RegisterID src, FPRegisterID dest)
1720     {
1721         m_assembler.fmov<32>(dest, src);
1722     }
1723
1724     void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
1725     {
1726         m_assembler.fcmp<64>(left, right);
1727         moveConditionallyAfterFloatingPointCompare<64>(cond, src, dest);
1728     }
1729
1730     void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
1731     {
1732         m_assembler.fcmp<64>(left, right);
1733         moveConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest);
1734     }
1735
1736     void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
1737     {
1738         m_assembler.fcmp<32>(left, right);
1739         moveConditionallyAfterFloatingPointCompare<64>(cond, src, dest);
1740     }
1741
1742     void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
1743     {
1744         m_assembler.fcmp<32>(left, right);
1745         moveConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest);
1746     }
1747
1748     template<int datasize>
1749     void moveConditionallyAfterFloatingPointCompare(DoubleCondition cond, RegisterID src, RegisterID dest)
1750     {
1751         if (cond == DoubleNotEqual) {
1752             Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1753             m_assembler.csel<datasize>(dest, src, dest, ARM64Assembler::ConditionNE);
1754             unordered.link(this);
1755             return;
1756         }
1757         if (cond == DoubleEqualOrUnordered) {
1758             // If the compare is unordered, src is copied to dest and the
1759             // next csel has all arguments equal to src.
1760             // If the compare is ordered, dest is unchanged and EQ decides
1761             // what value to set.
1762             m_assembler.csel<datasize>(dest, src, dest, ARM64Assembler::ConditionVS);
1763             m_assembler.csel<datasize>(dest, src, dest, ARM64Assembler::ConditionEQ);
1764             return;
1765         }
1766         m_assembler.csel<datasize>(dest, src, dest, ARM64Condition(cond));
1767     }
1768
1769     template<int datasize>
1770     void moveConditionallyAfterFloatingPointCompare(DoubleCondition cond, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
1771     {
1772         if (cond == DoubleNotEqual) {
1773             Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1774             m_assembler.csel<datasize>(dest, thenCase, elseCase, ARM64Assembler::ConditionNE);
1775             unordered.link(this);
1776             return;
1777         }
1778         if (cond == DoubleEqualOrUnordered) {
1779             // If the compare is unordered, thenCase is copied to elseCase and the
1780             // next csel has all arguments equal to thenCase.
1781             // If the compare is ordered, dest is unchanged and EQ decides
1782             // what value to set.
1783             m_assembler.csel<datasize>(elseCase, thenCase, elseCase, ARM64Assembler::ConditionVS);
1784             m_assembler.csel<datasize>(dest, thenCase, elseCase, ARM64Assembler::ConditionEQ);
1785             return;
1786         }
1787         m_assembler.csel<datasize>(dest, thenCase, elseCase, ARM64Condition(cond));
1788     }
1789
1790     template<int datasize>
1791     void moveDoubleConditionallyAfterFloatingPointCompare(DoubleCondition cond, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
1792     {
1793         if (cond == DoubleNotEqual) {
1794             Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1795             m_assembler.fcsel<datasize>(dest, thenCase, elseCase, ARM64Assembler::ConditionNE);
1796             unordered.link(this);
1797             return;
1798         }
1799         if (cond == DoubleEqualOrUnordered) {
1800             // If the compare is unordered, thenCase is copied to elseCase and the
1801             // next csel has all arguments equal to thenCase.
1802             // If the compare is ordered, dest is unchanged and EQ decides
1803             // what value to set.
1804             m_assembler.fcsel<datasize>(elseCase, thenCase, elseCase, ARM64Assembler::ConditionVS);
1805             m_assembler.fcsel<datasize>(dest, thenCase, elseCase, ARM64Assembler::ConditionEQ);
1806             return;
1807         }
1808         m_assembler.fcsel<datasize>(dest, thenCase, elseCase, ARM64Condition(cond));
1809     }
1810
1811     void moveDoubleConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
1812     {
1813         m_assembler.fcmp<64>(left, right);
1814         moveDoubleConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest);
1815     }
1816
1817     void moveDoubleConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
1818     {
1819         m_assembler.fcmp<32>(left, right);
1820         moveDoubleConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest);
1821     }
1822
1823     void mulDouble(FPRegisterID src, FPRegisterID dest)
1824     {
1825         mulDouble(dest, src, dest);
1826     }
1827
1828     void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1829     {
1830         m_assembler.fmul<64>(dest, op1, op2);
1831     }
1832
1833     void mulDouble(Address src, FPRegisterID dest)
1834     {
1835         loadDouble(src, fpTempRegister);
1836         mulDouble(fpTempRegister, dest);
1837     }
1838
1839     void mulFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1840     {
1841         m_assembler.fmul<32>(dest, op1, op2);
1842     }
1843
1844     void andDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1845     {
1846         m_assembler.vand<64>(dest, op1, op2);
1847     }
1848
1849     void andFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1850     {
1851         andDouble(op1, op2, dest);
1852     }
1853
1854     void negateDouble(FPRegisterID src, FPRegisterID dest)
1855     {
1856         m_assembler.fneg<64>(dest, src);
1857     }
1858
1859     void negateFloat(FPRegisterID src, FPRegisterID dest)
1860     {
1861         m_assembler.fneg<32>(dest, src);
1862     }
1863
1864     void sqrtDouble(FPRegisterID src, FPRegisterID dest)
1865     {
1866         m_assembler.fsqrt<64>(dest, src);
1867     }
1868
1869     void sqrtFloat(FPRegisterID src, FPRegisterID dest)
1870     {
1871         m_assembler.fsqrt<32>(dest, src);
1872     }
1873
1874     void storeDouble(FPRegisterID src, ImplicitAddress address)
1875     {
1876         if (tryStoreWithOffset<64>(src, address.base, address.offset))
1877             return;
1878
1879         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1880         m_assembler.str<64>(src, address.base, memoryTempRegister);
1881     }
1882
1883     void storeDouble(FPRegisterID src, TrustedImmPtr address)
1884     {
1885         moveToCachedReg(address, cachedMemoryTempRegister());
1886         m_assembler.str<64>(src, memoryTempRegister, ARM64Registers::zr);
1887     }
1888
1889     void storeDouble(FPRegisterID src, BaseIndex address)
1890     {
1891         if (!address.offset && (!address.scale || address.scale == 3)) {
1892             m_assembler.str<64>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1893             return;
1894         }
1895
1896         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1897         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1898         m_assembler.str<64>(src, address.base, memoryTempRegister);
1899     }
1900
1901     void storeFloat(FPRegisterID src, ImplicitAddress address)
1902     {
1903         if (tryStoreWithOffset<32>(src, address.base, address.offset))
1904             return;
1905
1906         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1907         m_assembler.str<32>(src, address.base, memoryTempRegister);
1908     }
1909     
1910     void storeFloat(FPRegisterID src, BaseIndex address)
1911     {
1912         if (!address.offset && (!address.scale || address.scale == 2)) {
1913             m_assembler.str<32>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1914             return;
1915         }
1916
1917         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1918         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1919         m_assembler.str<32>(src, address.base, memoryTempRegister);
1920     }
1921
1922     void subDouble(FPRegisterID src, FPRegisterID dest)
1923     {
1924         subDouble(dest, src, dest);
1925     }
1926
1927     void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1928     {
1929         m_assembler.fsub<64>(dest, op1, op2);
1930     }
1931
1932     void subDouble(Address src, FPRegisterID dest)
1933     {
1934         loadDouble(src, fpTempRegister);
1935         subDouble(fpTempRegister, dest);
1936     }
1937
1938     void subFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1939     {
1940         m_assembler.fsub<32>(dest, op1, op2);
1941     }
1942
1943     // Result is undefined if the value is outside of the integer range.
1944     void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
1945     {
1946         m_assembler.fcvtzs<32, 64>(dest, src);
1947     }
1948
1949     void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
1950     {
1951         m_assembler.fcvtzu<32, 64>(dest, src);
1952     }
1953
1954
1955     // Stack manipulation operations:
1956     //
1957     // The ABI is assumed to provide a stack abstraction to memory,
1958     // containing machine word sized units of data. Push and pop
1959     // operations add and remove a single register sized unit of data
1960     // to or from the stack. These operations are not supported on
1961     // ARM64. Peek and poke operations read or write values on the
1962     // stack, without moving the current stack position. Additionally,
1963     // there are popToRestore and pushToSave operations, which are
1964     // designed just for quick-and-dirty saving and restoring of
1965     // temporary values. These operations don't claim to have any
1966     // ABI compatibility.
1967     
1968     void pop(RegisterID) NO_RETURN_DUE_TO_CRASH
1969     {
1970         CRASH();
1971     }
1972
1973     void push(RegisterID) NO_RETURN_DUE_TO_CRASH
1974     {
1975         CRASH();
1976     }
1977
1978     void push(Address) NO_RETURN_DUE_TO_CRASH
1979     {
1980         CRASH();
1981     }
1982
1983     void push(TrustedImm32) NO_RETURN_DUE_TO_CRASH
1984     {
1985         CRASH();
1986     }
1987
1988     void popPair(RegisterID dest1, RegisterID dest2)
1989     {
1990         m_assembler.ldp<64>(dest1, dest2, ARM64Registers::sp, PairPostIndex(16));
1991     }
1992
1993     void pushPair(RegisterID src1, RegisterID src2)
1994     {
1995         m_assembler.stp<64>(src1, src2, ARM64Registers::sp, PairPreIndex(-16));
1996     }
1997
1998     void popToRestore(RegisterID dest)
1999     {
2000         m_assembler.ldr<64>(dest, ARM64Registers::sp, PostIndex(16));
2001     }
2002
2003     void pushToSave(RegisterID src)
2004     {
2005         m_assembler.str<64>(src, ARM64Registers::sp, PreIndex(-16));
2006     }
2007     
2008     void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm)
2009     {
2010         RegisterID reg = dataTempRegister;
2011         pushPair(reg, reg);
2012         move(imm, reg);
2013         store64(reg, stackPointerRegister);
2014         load64(Address(stackPointerRegister, 8), reg);
2015     }
2016
2017     void pushToSave(Address address)
2018     {
2019         load32(address, getCachedDataTempRegisterIDAndInvalidate());
2020         pushToSave(dataTempRegister);
2021     }
2022
2023     void pushToSave(TrustedImm32 imm)
2024     {
2025         move(imm, getCachedDataTempRegisterIDAndInvalidate());
2026         pushToSave(dataTempRegister);
2027     }
2028     
2029     void popToRestore(FPRegisterID dest)
2030     {
2031         loadDouble(stackPointerRegister, dest);
2032         add64(TrustedImm32(16), stackPointerRegister);
2033     }
2034     
2035     void pushToSave(FPRegisterID src)
2036     {
2037         sub64(TrustedImm32(16), stackPointerRegister);
2038         storeDouble(src, stackPointerRegister);
2039     }
2040
2041     static ptrdiff_t pushToSaveByteOffset() { return 16; }
2042
2043     // Register move operations:
2044
2045     void move(RegisterID src, RegisterID dest)
2046     {
2047         if (src != dest)
2048             m_assembler.mov<64>(dest, src);
2049     }
2050
2051     void move(TrustedImm32 imm, RegisterID dest)
2052     {
2053         moveInternal<TrustedImm32, int32_t>(imm, dest);
2054     }
2055
2056     void move(TrustedImmPtr imm, RegisterID dest)
2057     {
2058         moveInternal<TrustedImmPtr, intptr_t>(imm, dest);
2059     }
2060
2061     void move(TrustedImm64 imm, RegisterID dest)
2062     {
2063         moveInternal<TrustedImm64, int64_t>(imm, dest);
2064     }
2065
2066     void swap(RegisterID reg1, RegisterID reg2)
2067     {
2068         move(reg1, getCachedDataTempRegisterIDAndInvalidate());
2069         move(reg2, reg1);
2070         move(dataTempRegister, reg2);
2071     }
2072
2073     void signExtend32ToPtr(TrustedImm32 imm, RegisterID dest)
2074     {
2075         move(TrustedImmPtr(reinterpret_cast<void*>(static_cast<intptr_t>(imm.m_value))), dest);
2076     }
2077     
2078     void signExtend32ToPtr(RegisterID src, RegisterID dest)
2079     {
2080         m_assembler.sxtw(dest, src);
2081     }
2082
2083     void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
2084     {
2085         m_assembler.uxtw(dest, src);
2086     }
2087
2088     void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest)
2089     {
2090         m_assembler.cmp<32>(left, right);
2091         m_assembler.csel<32>(dest, src, dest, ARM64Condition(cond));
2092     }
2093
2094     void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2095     {
2096         m_assembler.cmp<32>(left, right);
2097         m_assembler.csel<32>(dest, thenCase, elseCase, ARM64Condition(cond));
2098     }
2099
2100     void moveConditionally32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2101     {
2102         if (!right.m_value) {
2103             if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
2104                 moveConditionallyTest32(*resultCondition, left, left, thenCase, elseCase, dest);
2105                 return;
2106             }
2107         }
2108
2109         if (isUInt12(right.m_value))
2110             m_assembler.cmp<32>(left, UInt12(right.m_value));
2111         else if (isUInt12(-right.m_value))
2112             m_assembler.cmn<32>(left, UInt12(-right.m_value));
2113         else {
2114             moveToCachedReg(right, dataMemoryTempRegister());
2115             m_assembler.cmp<32>(left, dataTempRegister);
2116         }
2117         m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2118     }
2119
2120     void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest)
2121     {
2122         m_assembler.cmp<64>(left, right);
2123         m_assembler.csel<64>(dest, src, dest, ARM64Condition(cond));
2124     }
2125
2126     void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2127     {
2128         m_assembler.cmp<64>(left, right);
2129         m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2130     }
2131
2132     void moveConditionally64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2133     {
2134         if (!right.m_value) {
2135             if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
2136                 moveConditionallyTest64(*resultCondition, left, left, thenCase, elseCase, dest);
2137                 return;
2138             }
2139         }
2140
2141         if (isUInt12(right.m_value))
2142             m_assembler.cmp<64>(left, UInt12(right.m_value));
2143         else if (isUInt12(-right.m_value))
2144             m_assembler.cmn<64>(left, UInt12(-right.m_value));
2145         else {
2146             moveToCachedReg(right, dataMemoryTempRegister());
2147             m_assembler.cmp<64>(left, dataTempRegister);
2148         }
2149         m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2150     }
2151
2152     void moveConditionallyTest32(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest)
2153     {
2154         m_assembler.tst<32>(testReg, mask);
2155         m_assembler.csel<32>(dest, src, dest, ARM64Condition(cond));
2156     }
2157
2158     void moveConditionallyTest32(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2159     {
2160         m_assembler.tst<32>(left, right);
2161         m_assembler.csel<32>(dest, thenCase, elseCase, ARM64Condition(cond));
2162     }
2163
2164     void moveConditionallyTest32(ResultCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2165     {
2166         test32(left, right);
2167         m_assembler.csel<32>(dest, thenCase, elseCase, ARM64Condition(cond));
2168     }
2169
2170     void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest)
2171     {
2172         m_assembler.tst<64>(testReg, mask);
2173         m_assembler.csel<64>(dest, src, dest, ARM64Condition(cond));
2174     }
2175
2176     void moveConditionallyTest64(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2177     {
2178         m_assembler.tst<64>(left, right);
2179         m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2180     }
2181
2182     void moveDoubleConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2183     {
2184         m_assembler.cmp<32>(left, right);
2185         m_assembler.fcsel<32>(dest, thenCase, elseCase, ARM64Condition(cond));
2186     }
2187
2188     void moveDoubleConditionally32(RelationalCondition cond, RegisterID left, TrustedImm32 right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2189     {
2190         if (!right.m_value) {
2191             if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
2192                 moveDoubleConditionallyTest32(*resultCondition, left, left, thenCase, elseCase, dest);
2193                 return;
2194             }
2195         }
2196
2197         if (isUInt12(right.m_value))
2198             m_assembler.cmp<32>(left, UInt12(right.m_value));
2199         else if (isUInt12(-right.m_value))
2200             m_assembler.cmn<32>(left, UInt12(-right.m_value));
2201         else {
2202             moveToCachedReg(right, dataMemoryTempRegister());
2203             m_assembler.cmp<32>(left, dataTempRegister);
2204         }
2205         m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2206     }
2207
2208     void moveDoubleConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2209     {
2210         m_assembler.cmp<64>(left, right);
2211         m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2212     }
2213
2214     void moveDoubleConditionally64(RelationalCondition cond, RegisterID left, TrustedImm32 right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2215     {
2216         if (!right.m_value) {
2217             if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
2218                 moveDoubleConditionallyTest64(*resultCondition, left, left, thenCase, elseCase, dest);
2219                 return;
2220             }
2221         }
2222
2223         if (isUInt12(right.m_value))
2224             m_assembler.cmp<64>(left, UInt12(right.m_value));
2225         else if (isUInt12(-right.m_value))
2226             m_assembler.cmn<64>(left, UInt12(-right.m_value));
2227         else {
2228             moveToCachedReg(right, dataMemoryTempRegister());
2229             m_assembler.cmp<64>(left, dataTempRegister);
2230         }
2231         m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2232     }
2233
2234     void moveDoubleConditionallyTest32(ResultCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2235     {
2236         m_assembler.tst<32>(left, right);
2237         m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2238     }
2239
2240     void moveDoubleConditionallyTest32(ResultCondition cond, RegisterID left, TrustedImm32 right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2241     {
2242         test32(left, right);
2243         m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2244     }
2245
2246     void moveDoubleConditionallyTest64(ResultCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2247     {
2248         m_assembler.tst<64>(left, right);
2249         m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2250     }
2251
2252     // Forwards / external control flow operations:
2253     //
2254     // This set of jump and conditional branch operations return a Jump
2255     // object which may linked at a later point, allow forwards jump,
2256     // or jumps that will require external linkage (after the code has been
2257     // relocated).
2258     //
2259     // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
2260     // respecitvely, for unsigned comparisons the names b, a, be, and ae are
2261     // used (representing the names 'below' and 'above').
2262     //
2263     // Operands to the comparision are provided in the expected order, e.g.
2264     // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
2265     // treated as a signed 32bit value, is less than or equal to 5.
2266     //
2267     // jz and jnz test whether the first operand is equal to zero, and take
2268     // an optional second operand of a mask under which to perform the test.
2269
2270     Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
2271     {
2272         m_assembler.cmp<32>(left, right);
2273         return Jump(makeBranch(cond));
2274     }
2275
2276     Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
2277     {
2278         if (!right.m_value) {
2279             if (auto resultCondition = commuteCompareToZeroIntoTest(cond))
2280                 return branchTest32(*resultCondition, left, left);
2281         }
2282
2283         if (isUInt12(right.m_value))
2284             m_assembler.cmp<32>(left, UInt12(right.m_value));
2285         else if (isUInt12(-right.m_value))
2286             m_assembler.cmn<32>(left, UInt12(-right.m_value));
2287         else {
2288             moveToCachedReg(right, dataMemoryTempRegister());
2289             m_assembler.cmp<32>(left, dataTempRegister);
2290         }
2291         return Jump(makeBranch(cond));
2292     }
2293
2294     Jump branch32(RelationalCondition cond, RegisterID left, Address right)
2295     {
2296         load32(right, getCachedMemoryTempRegisterIDAndInvalidate());
2297         return branch32(cond, left, memoryTempRegister);
2298     }
2299
2300     Jump branch32(RelationalCondition cond, Address left, RegisterID right)
2301     {
2302         load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
2303         return branch32(cond, memoryTempRegister, right);
2304     }
2305
2306     Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
2307     {
2308         load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
2309         return branch32(cond, memoryTempRegister, right);
2310     }
2311
2312     Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
2313     {
2314         load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
2315         return branch32(cond, memoryTempRegister, right);
2316     }
2317
2318     Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
2319     {
2320         load32(left.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
2321         return branch32(cond, dataTempRegister, right);
2322     }
2323
2324     Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
2325     {
2326         load32(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
2327         return branch32(cond, memoryTempRegister, right);
2328     }
2329
2330     Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right)
2331     {
2332         if (right == ARM64Registers::sp) {
2333             if (cond == Equal && left != ARM64Registers::sp) {
2334                 // CMP can only use SP for the left argument, since we are testing for equality, the order
2335                 // does not matter here.
2336                 std::swap(left, right);
2337             } else {
2338                 move(right, getCachedDataTempRegisterIDAndInvalidate());
2339                 right = dataTempRegister;
2340             }
2341         }
2342         m_assembler.cmp<64>(left, right);
2343         return Jump(makeBranch(cond));
2344     }
2345
2346     Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm32 right)
2347     {
2348         if (!right.m_value) {
2349             if (auto resultCondition = commuteCompareToZeroIntoTest(cond))
2350                 return branchTest64(*resultCondition, left, left);
2351         }
2352
2353         if (isUInt12(right.m_value))
2354             m_assembler.cmp<64>(left, UInt12(right.m_value));
2355         else if (isUInt12(-right.m_value))
2356             m_assembler.cmn<64>(left, UInt12(-right.m_value));
2357         else {
2358             moveToCachedReg(right, dataMemoryTempRegister());
2359             m_assembler.cmp<64>(left, dataTempRegister);
2360         }
2361         return Jump(makeBranch(cond));
2362     }
2363
2364     Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right)
2365     {
2366         intptr_t immediate = right.m_value;
2367         if (!immediate) {
2368             if (auto resultCondition = commuteCompareToZeroIntoTest(cond))
2369                 return branchTest64(*resultCondition, left, left);
2370         }
2371
2372         if (isUInt12(immediate))
2373             m_assembler.cmp<64>(left, UInt12(static_cast<int32_t>(immediate)));
2374         else if (isUInt12(-immediate))
2375             m_assembler.cmn<64>(left, UInt12(static_cast<int32_t>(-immediate)));
2376         else {
2377             moveToCachedReg(right, dataMemoryTempRegister());
2378             m_assembler.cmp<64>(left, dataTempRegister);
2379         }
2380         return Jump(makeBranch(cond));
2381     }
2382
2383     Jump branch64(RelationalCondition cond, RegisterID left, Address right)
2384     {
2385         load64(right, getCachedMemoryTempRegisterIDAndInvalidate());
2386         return branch64(cond, left, memoryTempRegister);
2387     }
2388
2389     Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
2390     {
2391         load64(left.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
2392         return branch64(cond, dataTempRegister, right);
2393     }
2394
2395     Jump branch64(RelationalCondition cond, Address left, RegisterID right)
2396     {
2397         load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
2398         return branch64(cond, memoryTempRegister, right);
2399     }
2400
2401     Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right)
2402     {
2403         load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
2404         return branch64(cond, memoryTempRegister, right);
2405     }
2406
2407     Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
2408     {
2409         load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
2410         return branch64(cond, memoryTempRegister, right);
2411     }
2412
2413     Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
2414     {
2415         TrustedImm32 right8(static_cast<int8_t>(right.m_value));
2416         load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
2417         return branch32(cond, memoryTempRegister, right8);
2418     }
2419
2420     Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
2421     {
2422         TrustedImm32 right8(static_cast<int8_t>(right.m_value));
2423         load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
2424         return branch32(cond, memoryTempRegister, right8);
2425     }
2426     
2427     Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
2428     {
2429         TrustedImm32 right8(static_cast<int8_t>(right.m_value));
2430         load8(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
2431         return branch32(cond, memoryTempRegister, right8);
2432     }
2433     
2434     Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
2435     {
2436         if (reg == mask && (cond == Zero || cond == NonZero))
2437             return Jump(makeCompareAndBranch<32>(static_cast<ZeroCondition>(cond), reg));
2438         m_assembler.tst<32>(reg, mask);
2439         return Jump(makeBranch(cond));
2440     }
2441
2442     void test32(RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
2443     {
2444         if (mask.m_value == -1)
2445             m_assembler.tst<32>(reg, reg);
2446         else {
2447             LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value);
2448
2449             if (logicalImm.isValid())
2450                 m_assembler.tst<32>(reg, logicalImm);
2451             else {
2452                 move(mask, getCachedDataTempRegisterIDAndInvalidate());
2453                 m_assembler.tst<32>(reg, dataTempRegister);
2454             }
2455         }
2456     }
2457
2458     Jump branch(ResultCondition cond)
2459     {
2460         return Jump(makeBranch(cond));
2461     }
2462
2463     Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
2464     {
2465         if (mask.m_value == -1) {
2466             if ((cond == Zero) || (cond == NonZero))
2467                 return Jump(makeCompareAndBranch<32>(static_cast<ZeroCondition>(cond), reg));
2468             m_assembler.tst<32>(reg, reg);
2469         } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
2470             return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
2471         else {
2472             LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value);
2473             if (logicalImm.isValid()) {
2474                 m_assembler.tst<32>(reg, logicalImm);
2475                 return Jump(makeBranch(cond));
2476             }
2477
2478             move(mask, getCachedDataTempRegisterIDAndInvalidate());
2479             m_assembler.tst<32>(reg, dataTempRegister);
2480         }
2481         return Jump(makeBranch(cond));
2482     }
2483
2484     Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
2485     {
2486         load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
2487         return branchTest32(cond, memoryTempRegister, mask);
2488     }
2489
2490     Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
2491     {
2492         load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
2493         return branchTest32(cond, memoryTempRegister, mask);
2494     }
2495
2496     Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask)
2497     {
2498         if (reg == mask && (cond == Zero || cond == NonZero))
2499             return Jump(makeCompareAndBranch<64>(static_cast<ZeroCondition>(cond), reg));
2500         m_assembler.tst<64>(reg, mask);
2501         return Jump(makeBranch(cond));
2502     }
2503
2504     Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
2505     {
2506         if (mask.m_value == -1) {
2507             if ((cond == Zero) || (cond == NonZero))
2508                 return Jump(makeCompareAndBranch<64>(static_cast<ZeroCondition>(cond), reg));
2509             m_assembler.tst<64>(reg, reg);
2510         } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
2511             return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
2512         else {
2513             LogicalImmediate logicalImm = LogicalImmediate::create64(mask.m_value);
2514
2515             if (logicalImm.isValid()) {
2516                 m_assembler.tst<64>(reg, logicalImm);
2517                 return Jump(makeBranch(cond));
2518             }
2519
2520             signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
2521             m_assembler.tst<64>(reg, dataTempRegister);
2522         }
2523         return Jump(makeBranch(cond));
2524     }
2525
2526     Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm64 mask)
2527     {
2528         if (mask.m_value == -1) {
2529             if ((cond == Zero) || (cond == NonZero))
2530                 return Jump(makeCompareAndBranch<64>(static_cast<ZeroCondition>(cond), reg));
2531             m_assembler.tst<64>(reg, reg);
2532         } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
2533             return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
2534         else {
2535             LogicalImmediate logicalImm = LogicalImmediate::create64(mask.m_value);
2536
2537             if (logicalImm.isValid()) {
2538                 m_assembler.tst<64>(reg, logicalImm);
2539                 return Jump(makeBranch(cond));
2540             }
2541
2542             move(mask, getCachedDataTempRegisterIDAndInvalidate());
2543             m_assembler.tst<64>(reg, dataTempRegister);
2544         }
2545         return Jump(makeBranch(cond));
2546     }
2547
2548     Jump branchTest64(ResultCondition cond, Address address, RegisterID mask)
2549     {
2550         load64(address, getCachedDataTempRegisterIDAndInvalidate());
2551         return branchTest64(cond, dataTempRegister, mask);
2552     }
2553
2554     Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
2555     {
2556         load64(address, getCachedDataTempRegisterIDAndInvalidate());
2557         return branchTest64(cond, dataTempRegister, mask);
2558     }
2559
2560     Jump branchTest64(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
2561     {
2562         load64(address, getCachedDataTempRegisterIDAndInvalidate());
2563         return branchTest64(cond, dataTempRegister, mask);
2564     }
2565
2566     Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
2567     {
2568         load64(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
2569         return branchTest64(cond, dataTempRegister, mask);
2570     }
2571
2572     Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
2573     {
2574         TrustedImm32 mask8(static_cast<int8_t>(mask.m_value));
2575         load8(address, getCachedDataTempRegisterIDAndInvalidate());
2576         return branchTest32(cond, dataTempRegister, mask8);
2577     }
2578
2579     Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
2580     {
2581         TrustedImm32 mask8(static_cast<int8_t>(mask.m_value));
2582         load8(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
2583         return branchTest32(cond, dataTempRegister, mask8);
2584     }
2585
2586     Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
2587     {
2588         TrustedImm32 mask8(static_cast<int8_t>(mask.m_value));
2589         move(TrustedImmPtr(reinterpret_cast<void*>(address.offset)), getCachedDataTempRegisterIDAndInvalidate());
2590         m_assembler.ldrb(dataTempRegister, address.base, dataTempRegister);
2591         return branchTest32(cond, dataTempRegister, mask8);
2592     }
2593
2594     Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
2595     {
2596         TrustedImm32 mask8(static_cast<int8_t>(mask.m_value));
2597         load8(address, getCachedDataTempRegisterIDAndInvalidate());
2598         return branchTest32(cond, dataTempRegister, mask8);
2599     }
2600
2601     Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
2602     {
2603         return branch32(cond, left, right);
2604     }
2605
2606
2607     // Arithmetic control flow operations:
2608     //
2609     // This set of conditional branch operations branch based
2610     // on the result of an arithmetic operation. The operation
2611     // is performed as normal, storing the result.
2612     //
2613     // * jz operations branch if the result is zero.
2614     // * jo operations branch if the (signed) arithmetic
2615     //   operation caused an overflow to occur.
2616     
2617     Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
2618     {
2619         m_assembler.add<32, S>(dest, op1, op2);
2620         return Jump(makeBranch(cond));
2621     }
2622
2623     Jump branchAdd32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
2624     {
2625         if (isUInt12(imm.m_value)) {
2626             m_assembler.add<32, S>(dest, op1, UInt12(imm.m_value));
2627             return Jump(makeBranch(cond));
2628         }
2629         if (isUInt12(-imm.m_value)) {
2630             m_assembler.sub<32, S>(dest, op1, UInt12(-imm.m_value));
2631             return Jump(makeBranch(cond));
2632         }
2633
2634         signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
2635         return branchAdd32(cond, op1, dataTempRegister, dest);
2636     }
2637
2638     Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
2639     {
2640         load32(src, getCachedDataTempRegisterIDAndInvalidate());
2641         return branchAdd32(cond, dest, dataTempRegister, dest);
2642     }
2643
2644     Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
2645     {
2646         return branchAdd32(cond, dest, src, dest);
2647     }
2648
2649     Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
2650     {
2651         return branchAdd32(cond, dest, imm, dest);
2652     }
2653
2654     Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress address)
2655     {
2656         load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
2657
2658         if (isUInt12(imm.m_value)) {
2659             m_assembler.add<32, S>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
2660             store32(dataTempRegister, address.m_ptr);
2661         } else if (isUInt12(-imm.m_value)) {
2662             m_assembler.sub<32, S>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
2663             store32(dataTempRegister, address.m_ptr);
2664         } else {
2665             move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
2666             m_assembler.add<32, S>(dataTempRegister, dataTempRegister, memoryTempRegister);
2667             store32(dataTempRegister, address.m_ptr);
2668         }
2669
2670         return Jump(makeBranch(cond));
2671     }
2672
2673     Jump branchAdd64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
2674     {
2675         m_assembler.add<64, S>(dest, op1, op2);
2676         return Jump(makeBranch(cond));
2677     }
2678
2679     Jump branchAdd64(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
2680     {
2681         if (isUInt12(imm.m_value)) {
2682             m_assembler.add<64, S>(dest, op1, UInt12(imm.m_value));
2683             return Jump(makeBranch(cond));
2684         }
2685         if (isUInt12(-imm.m_value)) {
2686             m_assembler.sub<64, S>(dest, op1, UInt12(-imm.m_value));
2687             return Jump(makeBranch(cond));
2688         }
2689
2690         move(imm, getCachedDataTempRegisterIDAndInvalidate());
2691         return branchAdd64(cond, op1, dataTempRegister, dest);
2692     }
2693
2694     Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest)
2695     {
2696         return branchAdd64(cond, dest, src, dest);
2697     }
2698
2699     Jump branchAdd64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
2700     {
2701         return branchAdd64(cond, dest, imm, dest);
2702     }
2703
2704     Jump branchAdd64(RelationalCondition cond, TrustedImm32 imm, RegisterID dest)
2705     {
2706         ASSERT(isUInt12(imm.m_value));
2707         m_assembler.add<64, S>(dest, dest, UInt12(imm.m_value));
2708         return Jump(makeBranch(cond));
2709     }
2710
2711     Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID scratch1, RegisterID scratch2, RegisterID dest)
2712     {
2713         ASSERT(cond != Signed);
2714
2715         if (cond != Overflow) {
2716             m_assembler.mul<32>(dest, src1, src2);
2717             return branchTest32(cond, dest);
2718         }
2719
2720         // This is a signed multiple of two 32-bit values, producing a 64-bit result.
2721         m_assembler.smull(dest, src1, src2);
2722         // Copy bits 63..32 of the result to bits 31..0 of scratch1.
2723         m_assembler.asr<64>(scratch1, dest, 32);
2724         // Splat bit 31 of the result to bits 31..0 of scratch2.
2725         m_assembler.asr<32>(scratch2, dest, 31);
2726         // After a mul32 the top 32 bits of the register should be clear.
2727         zeroExtend32ToPtr(dest, dest);
2728         // Check that bits 31..63 of the original result were all equal.
2729         return branch32(NotEqual, scratch2, scratch1);
2730     }
2731
2732     Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
2733     {
2734         return branchMul32(cond, src1, src2, getCachedDataTempRegisterIDAndInvalidate(), getCachedMemoryTempRegisterIDAndInvalidate(), dest);
2735     }
2736
2737     Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
2738     {
2739         return branchMul32(cond, dest, src, dest);
2740     }
2741
2742     Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
2743     {
2744         move(imm, getCachedDataTempRegisterIDAndInvalidate());
2745         return branchMul32(cond, dataTempRegister, src, dest);
2746     }
2747
2748     Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID scratch1, RegisterID scratch2, RegisterID dest)
2749     {
2750         ASSERT(cond != Signed);
2751
2752         // This is a signed multiple of two 64-bit values, producing a 64-bit result.
2753         m_assembler.mul<64>(dest, src1, src2);
2754
2755         if (cond != Overflow)
2756             return branchTest64(cond, dest);
2757
2758         // Compute bits 127..64 of the result into scratch1.
2759         m_assembler.smulh(scratch1, src1, src2);
2760         // Splat bit 63 of the result to bits 63..0 of scratch2.
2761         m_assembler.asr<64>(scratch2, dest, 63);
2762         // Check that bits 31..63 of the original result were all equal.
2763         return branch64(NotEqual, scratch2, scratch1);
2764     }
2765
2766     Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
2767     {
2768         return branchMul64(cond, src1, src2, getCachedDataTempRegisterIDAndInvalidate(), getCachedMemoryTempRegisterIDAndInvalidate(), dest);
2769     }
2770
2771     Jump branchMul64(ResultCondition cond, RegisterID src, RegisterID dest)
2772     {
2773         return branchMul64(cond, dest, src, dest);
2774     }
2775
2776     Jump branchNeg32(ResultCondition cond, RegisterID dest)
2777     {
2778         m_assembler.neg<32, S>(dest, dest);
2779         return Jump(makeBranch(cond));
2780     }
2781
2782     Jump branchNeg64(ResultCondition cond, RegisterID srcDest)
2783     {
2784         m_assembler.neg<64, S>(srcDest, srcDest);
2785         return Jump(makeBranch(cond));
2786     }
2787
2788     Jump branchSub32(ResultCondition cond, RegisterID dest)
2789     {
2790         m_assembler.neg<32, S>(dest, dest);
2791         return Jump(makeBranch(cond));
2792     }
2793
2794     Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
2795     {
2796         m_assembler.sub<32, S>(dest, op1, op2);
2797         return Jump(makeBranch(cond));
2798     }
2799
2800     Jump branchSub32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
2801     {
2802         if (isUInt12(imm.m_value)) {
2803             m_assembler.sub<32, S>(dest, op1, UInt12(imm.m_value));
2804             return Jump(makeBranch(cond));
2805         }
2806         if (isUInt12(-imm.m_value)) {
2807             m_assembler.add<32, S>(dest, op1, UInt12(-imm.m_value));
2808             return Jump(makeBranch(cond));
2809         }
2810
2811         signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
2812         return branchSub32(cond, op1, dataTempRegister, dest);
2813     }
2814
2815     Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
2816     {
2817         return branchSub32(cond, dest, src, dest);
2818     }
2819
2820     Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
2821     {
2822         return branchSub32(cond, dest, imm, dest);
2823     }
2824
2825     Jump branchSub64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
2826     {
2827         m_assembler.sub<64, S>(dest, op1, op2);
2828         return Jump(makeBranch(cond));
2829     }
2830
2831     Jump branchSub64(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
2832     {
2833         if (isUInt12(imm.m_value)) {
2834             m_assembler.sub<64, S>(dest, op1, UInt12(imm.m_value));
2835             return Jump(makeBranch(cond));
2836         }
2837         if (isUInt12(-imm.m_value)) {
2838             m_assembler.add<64, S>(dest, op1, UInt12(-imm.m_value));
2839             return Jump(makeBranch(cond));
2840         }
2841
2842         move(imm, getCachedDataTempRegisterIDAndInvalidate());
2843         return branchSub64(cond, op1, dataTempRegister, dest);
2844     }
2845
2846     Jump branchSub64(ResultCondition cond, RegisterID src, RegisterID dest)
2847     {
2848         return branchSub64(cond, dest, src, dest);
2849     }
2850
2851     Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
2852     {
2853         return branchSub64(cond, dest, imm, dest);
2854     }
2855
2856     Jump branchSub64(RelationalCondition cond, TrustedImm32 imm, RegisterID dest)
2857     {
2858         ASSERT(isUInt12(imm.m_value));
2859         m_assembler.sub<64, S>(dest, dest, UInt12(imm.m_value));
2860         return Jump(makeBranch(cond));
2861     }
2862
2863
2864     // Jumps, calls, returns
2865
2866     ALWAYS_INLINE Call call()
2867     {
2868         AssemblerLabel pointerLabel = m_assembler.label();
2869         moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate());
2870         invalidateAllTempRegisters();
2871         m_assembler.blr(dataTempRegister);
2872         AssemblerLabel callLabel = m_assembler.label();
2873         ASSERT_UNUSED(pointerLabel, ARM64Assembler::getDifferenceBetweenLabels(callLabel, pointerLabel) == REPATCH_OFFSET_CALL_TO_POINTER);
2874         return Call(callLabel, Call::Linkable);
2875     }
2876
2877     ALWAYS_INLINE Call call(RegisterID target)
2878     {
2879         invalidateAllTempRegisters();
2880         m_assembler.blr(target);
2881         return Call(m_assembler.label(), Call::None);
2882     }
2883
2884     ALWAYS_INLINE Call call(Address address)
2885     {
2886         load64(address, getCachedDataTempRegisterIDAndInvalidate());
2887         return call(dataTempRegister);
2888     }
2889
2890     ALWAYS_INLINE Jump jump()
2891     {
2892         AssemblerLabel label = m_assembler.label();
2893         m_assembler.b();
2894         return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpNoConditionFixedSize : ARM64Assembler::JumpNoCondition);
2895     }
2896
2897     void jump(RegisterID target)
2898     {
2899         m_assembler.br(target);
2900     }
2901
2902     void jump(Address address)
2903     {
2904         load64(address, getCachedDataTempRegisterIDAndInvalidate());
2905         m_assembler.br(dataTempRegister);
2906     }
2907     
2908     void jump(BaseIndex address)
2909     {
2910         load64(address, getCachedDataTempRegisterIDAndInvalidate());
2911         m_assembler.br(dataTempRegister);
2912     }
2913
2914     void jump(AbsoluteAddress address)
2915     {
2916         move(TrustedImmPtr(address.m_ptr), getCachedDataTempRegisterIDAndInvalidate());
2917         load64(Address(dataTempRegister), dataTempRegister);
2918         m_assembler.br(dataTempRegister);
2919     }
2920
2921     ALWAYS_INLINE Call makeTailRecursiveCall(Jump oldJump)
2922     {
2923         oldJump.link(this);
2924         return tailRecursiveCall();
2925     }
2926
2927     ALWAYS_INLINE Call nearCall()
2928     {
2929         m_assembler.bl();
2930         return Call(m_assembler.label(), Call::LinkableNear);
2931     }
2932
2933     ALWAYS_INLINE Call nearTailCall()
2934     {
2935         AssemblerLabel label = m_assembler.label();
2936         m_assembler.b();
2937         return Call(label, Call::LinkableNearTail);
2938     }
2939
2940     ALWAYS_INLINE void ret()
2941     {
2942         m_assembler.ret();
2943     }
2944
2945     ALWAYS_INLINE Call tailRecursiveCall()
2946     {
2947         // Like a normal call, but don't link.
2948         AssemblerLabel pointerLabel = m_assembler.label();
2949         moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate());
2950         m_assembler.br(dataTempRegister);
2951         AssemblerLabel callLabel = m_assembler.label();
2952         ASSERT_UNUSED(pointerLabel, ARM64Assembler::getDifferenceBetweenLabels(callLabel, pointerLabel) == REPATCH_OFFSET_CALL_TO_POINTER);
2953         return Call(callLabel, Call::Linkable);
2954     }
2955
2956
2957     // Comparisons operations
2958
2959     void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
2960     {
2961         m_assembler.cmp<32>(left, right);
2962         m_assembler.cset<32>(dest, ARM64Condition(cond));
2963     }
2964
2965     void compare32(RelationalCondition cond, Address left, RegisterID right, RegisterID dest)
2966     {
2967         load32(left, getCachedDataTempRegisterIDAndInvalidate());
2968         m_assembler.cmp<32>(dataTempRegister, right);
2969         m_assembler.cset<32>(dest, ARM64Condition(cond));
2970     }
2971
2972     void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
2973     {
2974         if (!right.m_value) {
2975             if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
2976                 test32(*resultCondition, left, left, dest);
2977                 return;
2978             }
2979         }
2980
2981         if (isUInt12(right.m_value))
2982             m_assembler.cmp<32>(left, UInt12(right.m_value));
2983         else if (isUInt12(-right.m_value))
2984             m_assembler.cmn<32>(left, UInt12(-right.m_value));
2985         else {
2986             move(right, getCachedDataTempRegisterIDAndInvalidate());
2987             m_assembler.cmp<32>(left, dataTempRegister);
2988         }
2989         m_assembler.cset<32>(dest, ARM64Condition(cond));
2990     }
2991
2992     void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
2993     {
2994         m_assembler.cmp<64>(left, right);
2995         m_assembler.cset<32>(dest, ARM64Condition(cond));
2996     }
2997     
2998     void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
2999     {
3000         if (!right.m_value) {
3001             if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
3002                 test64(*resultCondition, left, left, dest);
3003                 return;
3004             }
3005         }
3006
3007         signExtend32ToPtr(right, getCachedDataTempRegisterIDAndInvalidate());
3008         m_assembler.cmp<64>(left, dataTempRegister);
3009         m_assembler.cset<32>(dest, ARM64Condition(cond));
3010     }
3011
3012     void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
3013     {
3014         TrustedImm32 right8(static_cast<int8_t>(right.m_value));
3015         load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
3016         move(right8, getCachedDataTempRegisterIDAndInvalidate());
3017         compare32(cond, memoryTempRegister, dataTempRegister, dest);
3018     }
3019
3020     void test32(ResultCondition cond, RegisterID src, RegisterID mask, RegisterID dest)
3021     {
3022         m_assembler.tst<32>(src, mask);
3023         m_assembler.cset<32>(dest, ARM64Condition(cond));
3024     }
3025
3026     void test32(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest)
3027     {
3028         test32(src, mask);
3029         m_assembler.cset<32>(dest, ARM64Condition(cond));
3030     }
3031
3032     void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
3033     {
3034         load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
3035         test32(cond, memoryTempRegister, mask, dest);
3036     }
3037
3038     void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
3039     {
3040         TrustedImm32 mask8(static_cast<int8_t>(mask.m_value));
3041         load8(address, getCachedMemoryTempRegisterIDAndInvalidate());
3042         test32(cond, memoryTempRegister, mask8, dest);
3043     }
3044
3045     void test64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
3046     {
3047         m_assembler.tst<64>(op1, op2);
3048         m_assembler.cset<32>(dest, ARM64Condition(cond));
3049     }
3050
3051     void test64(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest)
3052     {
3053         if (mask.m_value == -1)
3054             m_assembler.tst<64>(src, src);
3055         else {
3056             signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
3057             m_assembler.tst<64>(src, dataTempRegister);
3058         }
3059         m_assembler.cset<32>(dest, ARM64Condition(cond));
3060     }
3061
3062     void setCarry(RegisterID dest)
3063     {
3064         m_assembler.cset<32>(dest, ARM64Assembler::ConditionCS);
3065     }
3066
3067     // Patchable operations
3068
3069     ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dest)
3070     {
3071         DataLabel32 label(this);
3072         moveWithFixedWidth(imm, dest);
3073         return label;
3074     }
3075
3076     ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dest)
3077     {
3078         DataLabelPtr label(this);
3079         moveWithFixedWidth(imm, dest);
3080         return label;
3081     }
3082
3083     ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
3084     {
3085         dataLabel = DataLabelPtr(this);
3086         moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
3087         return branch64(cond, left, dataTempRegister);
3088     }
3089
3090     ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
3091     {
3092         dataLabel = DataLabelPtr(this);
3093         moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
3094         return branch64(cond, left, dataTempRegister);
3095     }
3096
3097     ALWAYS_INLINE Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
3098     {
3099         dataLabel = DataLabel32(this);
3100         moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
3101         return branch32(cond, left, dataTempRegister);
3102     }
3103
3104     PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
3105     {
3106         m_makeJumpPatchable = true;
3107         Jump result = branch64(cond, left, TrustedImm64(right));
3108         m_makeJumpPatchable = false;
3109         return PatchableJump(result);
3110     }
3111
3112     PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
3113     {
3114         m_makeJumpPatchable = true;
3115         Jump result = branchTest32(cond, reg, mask);
3116         m_makeJumpPatchable = false;
3117         return PatchableJump(result);
3118     }
3119
3120     PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
3121     {
3122         m_makeJumpPatchable = true;
3123         Jump result = branch32(cond, reg, imm);
3124         m_makeJumpPatchable = false;
3125         return PatchableJump(result);
3126     }
3127
3128     PatchableJump patchableBranch32(RelationalCondition cond, Address left, TrustedImm32 imm)
3129     {
3130         m_makeJumpPatchable = true;
3131         Jump result = branch32(cond, left, imm);
3132         m_makeJumpPatchable = false;
3133         return PatchableJump(result);
3134     }
3135
3136     PatchableJump patchableBranch64(RelationalCondition cond, RegisterID reg, TrustedImm64 imm)
3137     {
3138         m_makeJumpPatchable = true;
3139         Jump result = branch64(cond, reg, imm);
3140         m_makeJumpPatchable = false;
3141         return PatchableJump(result);
3142     }
3143
3144     PatchableJump patchableBranch64(RelationalCondition cond, RegisterID left, RegisterID right)
3145     {
3146         m_makeJumpPatchable = true;
3147         Jump result = branch64(cond, left, right);
3148         m_makeJumpPatchable = false;
3149         return PatchableJump(result);
3150     }
3151
3152     PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
3153     {
3154         m_makeJumpPatchable = true;
3155         Jump result = branchPtrWithPatch(cond, left, dataLabel, initialRightValue);
3156         m_makeJumpPatchable = false;
3157         return PatchableJump(result);
3158     }
3159
3160     PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
3161     {
3162         m_makeJumpPatchable = true;
3163         Jump result = branch32WithPatch(cond, left, dataLabel, initialRightValue);
3164         m_makeJumpPatchable = false;
3165         return PatchableJump(result);
3166     }
3167
3168     PatchableJump patchableJump()
3169     {
3170         m_makeJumpPatchable = true;
3171         Jump result = jump();
3172         m_makeJumpPatchable = false;
3173         return PatchableJump(result);
3174     }
3175
3176     ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
3177     {
3178         DataLabelPtr label(this);
3179         moveWithFixedWidth(initialValue, getCachedDataTempRegisterIDAndInvalidate());
3180         store64(dataTempRegister, address);
3181         return label;
3182     }
3183
3184     ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address)
3185     {
3186         return storePtrWithPatch(TrustedImmPtr(0), address);
3187     }
3188
3189     static void reemitInitialMoveWithPatch(void* address, void* value)
3190     {
3191         ARM64Assembler::setPointer(static_cast<int*>(address), value, dataTempRegister, true);
3192     }
3193
3194     // Miscellaneous operations:
3195
3196     void breakpoint(uint16_t imm = 0)
3197     {
3198         m_assembler.brk(imm);
3199     }
3200
3201     void nop()
3202     {
3203         m_assembler.nop();
3204     }
3205     
3206     void memoryFence()
3207     {
3208         m_assembler.dmbSY();
3209     }
3210
3211
3212     // Misc helper functions.
3213
3214     // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
3215     static RelationalCondition invert(RelationalCondition cond)
3216     {
3217         return static_cast<RelationalCondition>(ARM64Assembler::invert(static_cast<ARM64Assembler::Condition>(cond)));
3218     }
3219
3220     static Optional<ResultCondition> commuteCompareToZeroIntoTest(RelationalCondition cond)
3221     {
3222         switch (cond) {
3223         case Equal:
3224             return Zero;
3225         case NotEqual:
3226             return NonZero;
3227         case LessThan:
3228             return Signed;
3229         case GreaterThanOrEqual:
3230             return PositiveOrZero;
3231             break;
3232         default:
3233             return Nullopt;
3234         }
3235     }
3236
3237     static FunctionPtr readCallTarget(CodeLocationCall call)
3238     {
3239         return FunctionPtr(reinterpret_cast<void(*)()>(ARM64Assembler::readCallTarget(call.dataLocation())));
3240     }
3241
3242     static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
3243     {
3244         ARM64Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
3245     }
3246     
3247     static ptrdiff_t maxJumpReplacementSize()
3248     {
3249         return ARM64Assembler::maxJumpReplacementSize();
3250     }
3251
3252     RegisterID scratchRegisterForBlinding()
3253     {
3254         // We *do not* have a scratch register for blinding.
3255         RELEASE_ASSERT_NOT_REACHED();
3256         return getCachedDataTempRegisterIDAndInvalidate();
3257     }
3258
3259     static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
3260     static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
3261     
3262     static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
3263     {
3264         return label.labelAtOffset(0);
3265     }
3266     
3267     static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
3268     {
3269         UNREACHABLE_FOR_PLATFORM();
3270         return CodeLocationLabel();
3271     }
3272     
3273     static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
3274     {
3275         UNREACHABLE_FOR_PLATFORM();
3276         return CodeLocationLabel();
3277     }
3278     
3279     static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
3280     {
3281         reemitInitialMoveWithPatch(instructionStart.dataLocation(), initialValue);
3282     }
3283     
3284     static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
3285     {
3286         UNREACHABLE_FOR_PLATFORM();
3287     }
3288
3289     static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
3290     {
3291         UNREACHABLE_FOR_PLATFORM();
3292     }
3293
3294     static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
3295     {
3296         ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
3297     }
3298
3299     static void repatchCall(CodeLocationCall call, FunctionPtr destination)
3300     {
3301         ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
3302     }
3303
3304 #if ENABLE(MASM_PROBE)