559d8b7db89f897b0f7feae6aa0606261d8eb490
[WebKit-https.git] / Source / JavaScriptCore / assembler / MacroAssemblerARM64.h
1 /*
2  * Copyright (C) 2012-2018 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #pragma once
27
28 #if ENABLE(ASSEMBLER)
29
30 #include "ARM64Assembler.h"
31 #include "AbstractMacroAssembler.h"
32 #include <wtf/MathExtras.h>
33 #include <wtf/Optional.h>
34
35 namespace JSC {
36
37 using Assembler = TARGET_ASSEMBLER;
38
39 class MacroAssemblerARM64 : public AbstractMacroAssembler<Assembler> {
40 public:
41     static const unsigned numGPRs = 32;
42     static const unsigned numFPRs = 32;
43     
44     static constexpr RegisterID dataTempRegister = ARM64Registers::ip0;
45     static constexpr RegisterID memoryTempRegister = ARM64Registers::ip1;
46
47     RegisterID scratchRegister()
48     {
49         RELEASE_ASSERT(m_allowScratchRegister);
50         return getCachedDataTempRegisterIDAndInvalidate();
51     }
52
53 protected:
54     static const ARM64Registers::FPRegisterID fpTempRegister = ARM64Registers::q31;
55     static const Assembler::SetFlags S = Assembler::S;
56     static const int64_t maskHalfWord0 = 0xffffl;
57     static const int64_t maskHalfWord1 = 0xffff0000l;
58     static const int64_t maskUpperWord = 0xffffffff00000000l;
59
60     static constexpr size_t INSTRUCTION_SIZE = 4;
61
62     // N instructions to load the pointer + 1 call instruction.
63     static constexpr ptrdiff_t REPATCH_OFFSET_CALL_TO_POINTER = -((Assembler::MAX_POINTER_BITS / 16 + 1) * INSTRUCTION_SIZE);
64
65 public:
66     MacroAssemblerARM64()
67         : m_dataMemoryTempRegister(this, dataTempRegister)
68         , m_cachedMemoryTempRegister(this, memoryTempRegister)
69         , m_makeJumpPatchable(false)
70     {
71     }
72
73     typedef Assembler::LinkRecord LinkRecord;
74     typedef Assembler::JumpType JumpType;
75     typedef Assembler::JumpLinkType JumpLinkType;
76     typedef Assembler::Condition Condition;
77
78     static const Assembler::Condition DefaultCondition = Assembler::ConditionInvalid;
79     static const Assembler::JumpType DefaultJump = Assembler::JumpNoConditionFixedSize;
80
81     Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); }
82     static bool canCompact(JumpType jumpType) { return Assembler::canCompact(jumpType); }
83     static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return Assembler::computeJumpType(jumpType, from, to); }
84     static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return Assembler::computeJumpType(record, from, to); }
85     static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return Assembler::jumpSizeDelta(jumpType, jumpLinkType); }
86     template <Assembler::CopyFunction copy>
87     static void link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction, uint8_t* to) { return Assembler::link<copy>(record, from, fromInstruction, to); }
88
89     static const Scale ScalePtr = TimesEight;
90
91     static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
92     {
93         // This is the largest 32-bit access allowed, aligned to 64-bit boundary.
94         return !(value & ~0x3ff8);
95     }
96
97     enum RelationalCondition {
98         Equal = Assembler::ConditionEQ,
99         NotEqual = Assembler::ConditionNE,
100         Above = Assembler::ConditionHI,
101         AboveOrEqual = Assembler::ConditionHS,
102         Below = Assembler::ConditionLO,
103         BelowOrEqual = Assembler::ConditionLS,
104         GreaterThan = Assembler::ConditionGT,
105         GreaterThanOrEqual = Assembler::ConditionGE,
106         LessThan = Assembler::ConditionLT,
107         LessThanOrEqual = Assembler::ConditionLE
108     };
109
110     enum ResultCondition {
111         Overflow = Assembler::ConditionVS,
112         Signed = Assembler::ConditionMI,
113         PositiveOrZero = Assembler::ConditionPL,
114         Zero = Assembler::ConditionEQ,
115         NonZero = Assembler::ConditionNE
116     };
117
118     enum ZeroCondition {
119         IsZero = Assembler::ConditionEQ,
120         IsNonZero = Assembler::ConditionNE
121     };
122
123     enum DoubleCondition {
124         // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
125         DoubleEqual = Assembler::ConditionEQ,
126         DoubleNotEqual = Assembler::ConditionVC, // Not the right flag! check for this & handle differently.
127         DoubleGreaterThan = Assembler::ConditionGT,
128         DoubleGreaterThanOrEqual = Assembler::ConditionGE,
129         DoubleLessThan = Assembler::ConditionLO,
130         DoubleLessThanOrEqual = Assembler::ConditionLS,
131         // If either operand is NaN, these conditions always evaluate to true.
132         DoubleEqualOrUnordered = Assembler::ConditionVS, // Not the right flag! check for this & handle differently.
133         DoubleNotEqualOrUnordered = Assembler::ConditionNE,
134         DoubleGreaterThanOrUnordered = Assembler::ConditionHI,
135         DoubleGreaterThanOrEqualOrUnordered = Assembler::ConditionHS,
136         DoubleLessThanOrUnordered = Assembler::ConditionLT,
137         DoubleLessThanOrEqualOrUnordered = Assembler::ConditionLE,
138     };
139
140     static const RegisterID stackPointerRegister = ARM64Registers::sp;
141     static const RegisterID framePointerRegister = ARM64Registers::fp;
142     static const RegisterID linkRegister = ARM64Registers::lr;
143
144     // FIXME: Get reasonable implementations for these
145     static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
146     static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; }
147
148     // Integer operations:
149
150     void add32(RegisterID a, RegisterID b, RegisterID dest)
151     {
152         ASSERT(a != ARM64Registers::sp && b != ARM64Registers::sp);
153         m_assembler.add<32>(dest, a, b);
154     }
155
156     void add32(RegisterID src, RegisterID dest)
157     {
158         m_assembler.add<32>(dest, dest, src);
159     }
160
161     void add32(TrustedImm32 imm, RegisterID dest)
162     {
163         add32(imm, dest, dest);
164     }
165
166     void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
167     {
168         if (isUInt12(imm.m_value))
169             m_assembler.add<32>(dest, src, UInt12(imm.m_value));
170         else if (isUInt12(-imm.m_value))
171             m_assembler.sub<32>(dest, src, UInt12(-imm.m_value));
172         else if (src != dest) {
173             move(imm, dest);
174             add32(src, dest);
175         } else {
176             move(imm, getCachedDataTempRegisterIDAndInvalidate());
177             m_assembler.add<32>(dest, src, dataTempRegister);
178         }
179     }
180
181     void add32(TrustedImm32 imm, Address address)
182     {
183         load32(address, getCachedDataTempRegisterIDAndInvalidate());
184
185         if (isUInt12(imm.m_value))
186             m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
187         else if (isUInt12(-imm.m_value))
188             m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
189         else {
190             move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
191             m_assembler.add<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
192         }
193
194         store32(dataTempRegister, address);
195     }
196
197     void add32(TrustedImm32 imm, AbsoluteAddress address)
198     {
199         load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
200
201         if (isUInt12(imm.m_value)) {
202             m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
203             store32(dataTempRegister, address.m_ptr);
204             return;
205         }
206
207         if (isUInt12(-imm.m_value)) {
208             m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
209             store32(dataTempRegister, address.m_ptr);
210             return;
211         }
212
213         move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
214         m_assembler.add<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
215         store32(dataTempRegister, address.m_ptr);
216     }
217
218     void add32(Address src, RegisterID dest)
219     {
220         load32(src, getCachedDataTempRegisterIDAndInvalidate());
221         add32(dataTempRegister, dest);
222     }
223
224     void add64(RegisterID a, RegisterID b, RegisterID dest)
225     {
226         ASSERT(a != ARM64Registers::sp || b != ARM64Registers::sp);
227         if (b == ARM64Registers::sp)
228             std::swap(a, b);
229         m_assembler.add<64>(dest, a, b);
230     }
231
232     void add64(RegisterID src, RegisterID dest)
233     {
234         if (src == ARM64Registers::sp)
235             m_assembler.add<64>(dest, src, dest);
236         else
237             m_assembler.add<64>(dest, dest, src);
238     }
239
240     void add64(TrustedImm32 imm, RegisterID dest)
241     {
242         if (isUInt12(imm.m_value)) {
243             m_assembler.add<64>(dest, dest, UInt12(imm.m_value));
244             return;
245         }
246         if (isUInt12(-imm.m_value)) {
247             m_assembler.sub<64>(dest, dest, UInt12(-imm.m_value));
248             return;
249         }
250
251         signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
252         m_assembler.add<64>(dest, dest, dataTempRegister);
253     }
254
255     void add64(TrustedImm64 imm, RegisterID dest)
256     {
257         intptr_t immediate = imm.m_value;
258
259         if (isUInt12(immediate)) {
260             m_assembler.add<64>(dest, dest, UInt12(static_cast<int32_t>(immediate)));
261             return;
262         }
263         if (isUInt12(-immediate)) {
264             m_assembler.sub<64>(dest, dest, UInt12(static_cast<int32_t>(-immediate)));
265             return;
266         }
267
268         move(imm, getCachedDataTempRegisterIDAndInvalidate());
269         m_assembler.add<64>(dest, dest, dataTempRegister);
270     }
271
272     void add64(TrustedImm32 imm, RegisterID src, RegisterID dest)
273     {
274         if (isUInt12(imm.m_value)) {
275             m_assembler.add<64>(dest, src, UInt12(imm.m_value));
276             return;
277         }
278         if (isUInt12(-imm.m_value)) {
279             m_assembler.sub<64>(dest, src, UInt12(-imm.m_value));
280             return;
281         }
282
283         signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
284         m_assembler.add<64>(dest, src, dataTempRegister);
285     }
286
287     void add64(TrustedImm32 imm, Address address)
288     {
289         load64(address, getCachedDataTempRegisterIDAndInvalidate());
290
291         if (isUInt12(imm.m_value))
292             m_assembler.add<64>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
293         else if (isUInt12(-imm.m_value))
294             m_assembler.sub<64>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
295         else {
296             signExtend32ToPtr(imm, getCachedMemoryTempRegisterIDAndInvalidate());
297             m_assembler.add<64>(dataTempRegister, dataTempRegister, memoryTempRegister);
298         }
299
300         store64(dataTempRegister, address);
301     }
302
303     void add64(TrustedImm32 imm, AbsoluteAddress address)
304     {
305         load64(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
306
307         if (isUInt12(imm.m_value)) {
308             m_assembler.add<64>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
309             store64(dataTempRegister, address.m_ptr);
310             return;
311         }
312
313         if (isUInt12(-imm.m_value)) {
314             m_assembler.sub<64>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
315             store64(dataTempRegister, address.m_ptr);
316             return;
317         }
318
319         signExtend32ToPtr(imm, getCachedMemoryTempRegisterIDAndInvalidate());
320         m_assembler.add<64>(dataTempRegister, dataTempRegister, memoryTempRegister);
321         store64(dataTempRegister, address.m_ptr);
322     }
323
324     void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest)
325     {
326         add64(imm, srcDest);
327     }
328
329     void add64(Address src, RegisterID dest)
330     {
331         load64(src, getCachedDataTempRegisterIDAndInvalidate());
332         m_assembler.add<64>(dest, dest, dataTempRegister);
333     }
334
335     void add64(AbsoluteAddress src, RegisterID dest)
336     {
337         load64(src.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
338         m_assembler.add<64>(dest, dest, dataTempRegister);
339     }
340
341     void and32(RegisterID src, RegisterID dest)
342     {
343         and32(dest, src, dest);
344     }
345
346     void and32(RegisterID op1, RegisterID op2, RegisterID dest)
347     {
348         m_assembler.and_<32>(dest, op1, op2);
349     }
350
351     void and32(TrustedImm32 imm, RegisterID dest)
352     {
353         and32(imm, dest, dest);
354     }
355
356     void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
357     {
358         LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
359
360         if (logicalImm.isValid()) {
361             m_assembler.and_<32>(dest, src, logicalImm);
362             return;
363         }
364
365         move(imm, getCachedDataTempRegisterIDAndInvalidate());
366         m_assembler.and_<32>(dest, src, dataTempRegister);
367     }
368
369     void and32(Address src, RegisterID dest)
370     {
371         load32(src, getCachedDataTempRegisterIDAndInvalidate());
372         and32(dataTempRegister, dest);
373     }
374
375     void and16(Address src, RegisterID dest)
376     {
377         load16(src, getCachedDataTempRegisterIDAndInvalidate());
378         and32(dataTempRegister, dest);
379     }
380
381     void and64(RegisterID src1, RegisterID src2, RegisterID dest)
382     {
383         m_assembler.and_<64>(dest, src1, src2);
384     }
385
386     void and64(TrustedImm64 imm, RegisterID src, RegisterID dest)
387     {
388         LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value);
389
390         if (logicalImm.isValid()) {
391             m_assembler.and_<64>(dest, src, logicalImm);
392             return;
393         }
394
395         move(imm, getCachedDataTempRegisterIDAndInvalidate());
396         m_assembler.and_<64>(dest, src, dataTempRegister);
397     }
398
399     void and64(RegisterID src, RegisterID dest)
400     {
401         m_assembler.and_<64>(dest, dest, src);
402     }
403
404     void and64(TrustedImm32 imm, RegisterID dest)
405     {
406         LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
407
408         if (logicalImm.isValid()) {
409             m_assembler.and_<64>(dest, dest, logicalImm);
410             return;
411         }
412
413         signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
414         m_assembler.and_<64>(dest, dest, dataTempRegister);
415     }
416
417     void and64(TrustedImmPtr imm, RegisterID dest)
418     {
419         LogicalImmediate logicalImm = LogicalImmediate::create64(reinterpret_cast<uint64_t>(imm.m_value));
420
421         if (logicalImm.isValid()) {
422             m_assembler.and_<64>(dest, dest, logicalImm);
423             return;
424         }
425
426         move(imm, getCachedDataTempRegisterIDAndInvalidate());
427         m_assembler.and_<64>(dest, dest, dataTempRegister);
428     }
429     
430     void countLeadingZeros32(RegisterID src, RegisterID dest)
431     {
432         m_assembler.clz<32>(dest, src);
433     }
434
435     void countLeadingZeros64(RegisterID src, RegisterID dest)
436     {
437         m_assembler.clz<64>(dest, src);
438     }
439
440     void countTrailingZeros32(RegisterID src, RegisterID dest)
441     {
442         // Arm does not have a count trailing zeros only a count leading zeros.
443         m_assembler.rbit<32>(dest, src);
444         m_assembler.clz<32>(dest, dest);
445     }
446
447     void countTrailingZeros64(RegisterID src, RegisterID dest)
448     {
449         // Arm does not have a count trailing zeros only a count leading zeros.
450         m_assembler.rbit<64>(dest, src);
451         m_assembler.clz<64>(dest, dest);
452     }
453
454     void byteSwap16(RegisterID dst)
455     {
456         m_assembler.rev16<32>(dst, dst);
457         zeroExtend16To32(dst, dst);
458     }
459
460     void byteSwap32(RegisterID dst)
461     {
462         m_assembler.rev<32>(dst, dst);
463     }
464
465     void byteSwap64(RegisterID dst)
466     {
467         m_assembler.rev<64>(dst, dst);
468     }
469
470     // Only used for testing purposes.
471     void illegalInstruction()
472     {
473         m_assembler.illegalInstruction();
474     }
475
476     void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
477     {
478         m_assembler.lsl<32>(dest, src, shiftAmount);
479     }
480
481     void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
482     {
483         m_assembler.lsl<32>(dest, src, imm.m_value & 0x1f);
484     }
485
486     void lshift32(RegisterID shiftAmount, RegisterID dest)
487     {
488         lshift32(dest, shiftAmount, dest);
489     }
490
491     void lshift32(TrustedImm32 imm, RegisterID dest)
492     {
493         lshift32(dest, imm, dest);
494     }
495
496     void lshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
497     {
498         m_assembler.lsl<64>(dest, src, shiftAmount);
499     }
500
501     void lshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
502     {
503         m_assembler.lsl<64>(dest, src, imm.m_value & 0x3f);
504     }
505
506     void lshift64(RegisterID shiftAmount, RegisterID dest)
507     {
508         lshift64(dest, shiftAmount, dest);
509     }
510
511     void lshift64(TrustedImm32 imm, RegisterID dest)
512     {
513         lshift64(dest, imm, dest);
514     }
515
516     void mul32(RegisterID left, RegisterID right, RegisterID dest)
517     {
518         m_assembler.mul<32>(dest, left, right);
519     }
520     
521     void mul32(RegisterID src, RegisterID dest)
522     {
523         m_assembler.mul<32>(dest, dest, src);
524     }
525
526     void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
527     {
528         move(imm, getCachedDataTempRegisterIDAndInvalidate());
529         m_assembler.mul<32>(dest, src, dataTempRegister);
530     }
531
532     void mul64(RegisterID src, RegisterID dest)
533     {
534         m_assembler.mul<64>(dest, dest, src);
535     }
536
537     void mul64(RegisterID left, RegisterID right, RegisterID dest)
538     {
539         m_assembler.mul<64>(dest, left, right);
540     }
541
542     void multiplyAdd32(RegisterID mulLeft, RegisterID mulRight, RegisterID summand, RegisterID dest)
543     {
544         m_assembler.madd<32>(dest, mulLeft, mulRight, summand);
545     }
546
547     void multiplySub32(RegisterID mulLeft, RegisterID mulRight, RegisterID minuend, RegisterID dest)
548     {
549         m_assembler.msub<32>(dest, mulLeft, mulRight, minuend);
550     }
551
552     void multiplyNeg32(RegisterID mulLeft, RegisterID mulRight, RegisterID dest)
553     {
554         m_assembler.msub<32>(dest, mulLeft, mulRight, ARM64Registers::zr);
555     }
556
557     void multiplyAdd64(RegisterID mulLeft, RegisterID mulRight, RegisterID summand, RegisterID dest)
558     {
559         m_assembler.madd<64>(dest, mulLeft, mulRight, summand);
560     }
561
562     void multiplySub64(RegisterID mulLeft, RegisterID mulRight, RegisterID minuend, RegisterID dest)
563     {
564         m_assembler.msub<64>(dest, mulLeft, mulRight, minuend);
565     }
566
567     void multiplyNeg64(RegisterID mulLeft, RegisterID mulRight, RegisterID dest)
568     {
569         m_assembler.msub<64>(dest, mulLeft, mulRight, ARM64Registers::zr);
570     }
571
572     void multiplySignExtend32(RegisterID left, RegisterID right, RegisterID dest)
573     {
574         m_assembler.smull(dest, left, right);
575     }
576
577     void div32(RegisterID dividend, RegisterID divisor, RegisterID dest)
578     {
579         m_assembler.sdiv<32>(dest, dividend, divisor);
580     }
581
582     void div64(RegisterID dividend, RegisterID divisor, RegisterID dest)
583     {
584         m_assembler.sdiv<64>(dest, dividend, divisor);
585     }
586
587     void uDiv32(RegisterID dividend, RegisterID divisor, RegisterID dest)
588     {
589         m_assembler.udiv<32>(dest, dividend, divisor);
590     }
591
592     void uDiv64(RegisterID dividend, RegisterID divisor, RegisterID dest)
593     {
594         m_assembler.udiv<64>(dest, dividend, divisor);
595     }
596
597     void neg32(RegisterID dest)
598     {
599         m_assembler.neg<32>(dest, dest);
600     }
601
602     void neg32(RegisterID src, RegisterID dest)
603     {
604         m_assembler.neg<32>(dest, src);
605     }
606
607     void neg64(RegisterID dest)
608     {
609         m_assembler.neg<64>(dest, dest);
610     }
611
612     void neg64(RegisterID src, RegisterID dest)
613     {
614         m_assembler.neg<64>(dest, src);
615     }
616
617     void or32(RegisterID src, RegisterID dest)
618     {
619         or32(dest, src, dest);
620     }
621
622     void or32(RegisterID op1, RegisterID op2, RegisterID dest)
623     {
624         m_assembler.orr<32>(dest, op1, op2);
625     }
626
627     void or32(TrustedImm32 imm, RegisterID dest)
628     {
629         or32(imm, dest, dest);
630     }
631
632     void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
633     {
634         LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
635
636         if (logicalImm.isValid()) {
637             m_assembler.orr<32>(dest, src, logicalImm);
638             return;
639         }
640
641         ASSERT(src != dataTempRegister);
642         move(imm, getCachedDataTempRegisterIDAndInvalidate());
643         m_assembler.orr<32>(dest, src, dataTempRegister);
644     }
645
646     void or32(RegisterID src, AbsoluteAddress address)
647     {
648         load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
649         m_assembler.orr<32>(dataTempRegister, dataTempRegister, src);
650         store32(dataTempRegister, address.m_ptr);
651     }
652
653     void or32(TrustedImm32 imm, AbsoluteAddress address)
654     {
655         LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
656         if (logicalImm.isValid()) {
657             load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
658             m_assembler.orr<32>(dataTempRegister, dataTempRegister, logicalImm);
659             store32(dataTempRegister, address.m_ptr);
660         } else {
661             load32(address.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
662             or32(imm, memoryTempRegister, getCachedDataTempRegisterIDAndInvalidate());
663             store32(dataTempRegister, address.m_ptr);
664         }
665     }
666
667     void or32(TrustedImm32 imm, Address address)
668     {
669         load32(address, getCachedDataTempRegisterIDAndInvalidate());
670         or32(imm, dataTempRegister, dataTempRegister);
671         store32(dataTempRegister, address);
672     }
673
674     void or64(RegisterID src, RegisterID dest)
675     {
676         or64(dest, src, dest);
677     }
678
679     void or64(RegisterID op1, RegisterID op2, RegisterID dest)
680     {
681         m_assembler.orr<64>(dest, op1, op2);
682     }
683
684     void or64(TrustedImm32 imm, RegisterID dest)
685     {
686         or64(imm, dest, dest);
687     }
688
689     void or64(TrustedImm32 imm, RegisterID src, RegisterID dest)
690     {
691         LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
692
693         if (logicalImm.isValid()) {
694             m_assembler.orr<64>(dest, src, logicalImm);
695             return;
696         }
697
698         signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
699         m_assembler.orr<64>(dest, src, dataTempRegister);
700     }
701
702     void or64(TrustedImm64 imm, RegisterID src, RegisterID dest)
703     {
704         LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value);
705
706         if (logicalImm.isValid()) {
707             m_assembler.orr<64>(dest, src, logicalImm);
708             return;
709         }
710
711         move(imm, getCachedDataTempRegisterIDAndInvalidate());
712         m_assembler.orr<64>(dest, src, dataTempRegister);
713     }
714
715     void or64(TrustedImm64 imm, RegisterID dest)
716     {
717         LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
718
719         if (logicalImm.isValid()) {
720             m_assembler.orr<64>(dest, dest, logicalImm);
721             return;
722         }
723
724         move(imm, getCachedDataTempRegisterIDAndInvalidate());
725         m_assembler.orr<64>(dest, dest, dataTempRegister);
726     }
727
728     void rotateRight32(RegisterID src, TrustedImm32 imm, RegisterID dest)
729     {
730         m_assembler.ror<32>(dest, src, imm.m_value & 31);
731     }
732
733     void rotateRight32(TrustedImm32 imm, RegisterID srcDst)
734     {
735         rotateRight32(srcDst, imm, srcDst);
736     }
737
738     void rotateRight32(RegisterID src, RegisterID shiftAmmount, RegisterID dest)
739     {
740         m_assembler.ror<32>(dest, src, shiftAmmount);
741     }
742
743     void rotateRight64(RegisterID src, TrustedImm32 imm, RegisterID dest)
744     {
745         m_assembler.ror<64>(dest, src, imm.m_value & 63);
746     }
747
748     void rotateRight64(TrustedImm32 imm, RegisterID srcDst)
749     {
750         rotateRight64(srcDst, imm, srcDst);
751     }
752
753     void rotateRight64(RegisterID src, RegisterID shiftAmmount, RegisterID dest)
754     {
755         m_assembler.ror<64>(dest, src, shiftAmmount);
756     }
757
758     void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
759     {
760         m_assembler.asr<32>(dest, src, shiftAmount);
761     }
762
763     void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
764     {
765         m_assembler.asr<32>(dest, src, imm.m_value & 0x1f);
766     }
767
768     void rshift32(RegisterID shiftAmount, RegisterID dest)
769     {
770         rshift32(dest, shiftAmount, dest);
771     }
772     
773     void rshift32(TrustedImm32 imm, RegisterID dest)
774     {
775         rshift32(dest, imm, dest);
776     }
777     
778     void rshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
779     {
780         m_assembler.asr<64>(dest, src, shiftAmount);
781     }
782     
783     void rshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
784     {
785         m_assembler.asr<64>(dest, src, imm.m_value & 0x3f);
786     }
787     
788     void rshift64(RegisterID shiftAmount, RegisterID dest)
789     {
790         rshift64(dest, shiftAmount, dest);
791     }
792     
793     void rshift64(TrustedImm32 imm, RegisterID dest)
794     {
795         rshift64(dest, imm, dest);
796     }
797
798     void sub32(RegisterID src, RegisterID dest)
799     {
800         m_assembler.sub<32>(dest, dest, src);
801     }
802
803     void sub32(RegisterID left, RegisterID right, RegisterID dest)
804     {
805         m_assembler.sub<32>(dest, left, right);
806     }
807
808     void sub32(TrustedImm32 imm, RegisterID dest)
809     {
810         if (isUInt12(imm.m_value)) {
811             m_assembler.sub<32>(dest, dest, UInt12(imm.m_value));
812             return;
813         }
814         if (isUInt12(-imm.m_value)) {
815             m_assembler.add<32>(dest, dest, UInt12(-imm.m_value));
816             return;
817         }
818
819         move(imm, getCachedDataTempRegisterIDAndInvalidate());
820         m_assembler.sub<32>(dest, dest, dataTempRegister);
821     }
822
823     void sub32(TrustedImm32 imm, Address address)
824     {
825         load32(address, getCachedDataTempRegisterIDAndInvalidate());
826
827         if (isUInt12(imm.m_value))
828             m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
829         else if (isUInt12(-imm.m_value))
830             m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
831         else {
832             move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
833             m_assembler.sub<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
834         }
835
836         store32(dataTempRegister, address);
837     }
838
839     void sub32(TrustedImm32 imm, AbsoluteAddress address)
840     {
841         load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
842
843         if (isUInt12(imm.m_value)) {
844             m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
845             store32(dataTempRegister, address.m_ptr);
846             return;
847         }
848
849         if (isUInt12(-imm.m_value)) {
850             m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
851             store32(dataTempRegister, address.m_ptr);
852             return;
853         }
854
855         move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
856         m_assembler.sub<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
857         store32(dataTempRegister, address.m_ptr);
858     }
859
860     void sub32(Address src, RegisterID dest)
861     {
862         load32(src, getCachedDataTempRegisterIDAndInvalidate());
863         sub32(dataTempRegister, dest);
864     }
865
866     void sub64(RegisterID src, RegisterID dest)
867     {
868         m_assembler.sub<64>(dest, dest, src);
869     }
870
871     void sub64(RegisterID a, RegisterID b, RegisterID dest)
872     {
873         m_assembler.sub<64>(dest, a, b);
874     }
875     
876     void sub64(TrustedImm32 imm, RegisterID dest)
877     {
878         if (isUInt12(imm.m_value)) {
879             m_assembler.sub<64>(dest, dest, UInt12(imm.m_value));
880             return;
881         }
882         if (isUInt12(-imm.m_value)) {
883             m_assembler.add<64>(dest, dest, UInt12(-imm.m_value));
884             return;
885         }
886
887         signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
888         m_assembler.sub<64>(dest, dest, dataTempRegister);
889     }
890     
891     void sub64(TrustedImm64 imm, RegisterID dest)
892     {
893         intptr_t immediate = imm.m_value;
894
895         if (isUInt12(immediate)) {
896             m_assembler.sub<64>(dest, dest, UInt12(static_cast<int32_t>(immediate)));
897             return;
898         }
899         if (isUInt12(-immediate)) {
900             m_assembler.add<64>(dest, dest, UInt12(static_cast<int32_t>(-immediate)));
901             return;
902         }
903
904         move(imm, getCachedDataTempRegisterIDAndInvalidate());
905         m_assembler.sub<64>(dest, dest, dataTempRegister);
906     }
907
908     void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
909     {
910         m_assembler.lsr<32>(dest, src, shiftAmount);
911     }
912     
913     void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
914     {
915         m_assembler.lsr<32>(dest, src, imm.m_value & 0x1f);
916     }
917
918     void urshift32(RegisterID shiftAmount, RegisterID dest)
919     {
920         urshift32(dest, shiftAmount, dest);
921     }
922     
923     void urshift32(TrustedImm32 imm, RegisterID dest)
924     {
925         urshift32(dest, imm, dest);
926     }
927
928     void urshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
929     {
930         m_assembler.lsr<64>(dest, src, shiftAmount);
931     }
932     
933     void urshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
934     {
935         m_assembler.lsr<64>(dest, src, imm.m_value & 0x3f);
936     }
937
938     void urshift64(RegisterID shiftAmount, RegisterID dest)
939     {
940         urshift64(dest, shiftAmount, dest);
941     }
942     
943     void urshift64(TrustedImm32 imm, RegisterID dest)
944     {
945         urshift64(dest, imm, dest);
946     }
947
948     void xor32(RegisterID src, RegisterID dest)
949     {
950         xor32(dest, src, dest);
951     }
952
953     void xor32(Address src, RegisterID dest)
954     {
955         load32(src, getCachedDataTempRegisterIDAndInvalidate());
956         xor32(dataTempRegister, dest);
957     }
958
959     void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
960     {
961         m_assembler.eor<32>(dest, op1, op2);
962     }
963
964     void xor32(TrustedImm32 imm, RegisterID dest)
965     {
966         xor32(imm, dest, dest);
967     }
968
969     void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
970     {
971         if (imm.m_value == -1)
972             m_assembler.mvn<32>(dest, src);
973         else {
974             LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
975
976             if (logicalImm.isValid()) {
977                 m_assembler.eor<32>(dest, src, logicalImm);
978                 return;
979             }
980
981             move(imm, getCachedDataTempRegisterIDAndInvalidate());
982             m_assembler.eor<32>(dest, src, dataTempRegister);
983         }
984     }
985
986     void xor64(RegisterID src, Address address)
987     {
988         load64(address, getCachedDataTempRegisterIDAndInvalidate());
989         m_assembler.eor<64>(dataTempRegister, dataTempRegister, src);
990         store64(dataTempRegister, address);
991     }
992
993     void xor64(RegisterID src, RegisterID dest)
994     {
995         xor64(dest, src, dest);
996     }
997
998     void xor64(RegisterID op1, RegisterID op2, RegisterID dest)
999     {
1000         m_assembler.eor<64>(dest, op1, op2);
1001     }
1002
1003     void xor64(TrustedImm32 imm, RegisterID dest)
1004     {
1005         xor64(imm, dest, dest);
1006     }
1007
1008     void xor64(TrustedImm64 imm, RegisterID src, RegisterID dest)
1009     {
1010         if (imm.m_value == -1)
1011             m_assembler.mvn<64>(dest, src);
1012         else {
1013             LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value);
1014
1015             if (logicalImm.isValid()) {
1016                 m_assembler.eor<64>(dest, src, logicalImm);
1017                 return;
1018             }
1019
1020             move(imm, getCachedDataTempRegisterIDAndInvalidate());
1021             m_assembler.eor<64>(dest, src, dataTempRegister);
1022         }
1023     }
1024
1025     void xor64(TrustedImm64 imm, RegisterID srcDest)
1026     {
1027         xor64(imm, srcDest, srcDest);
1028     }
1029
1030     void xor64(TrustedImm32 imm, RegisterID src, RegisterID dest)
1031     {
1032         if (imm.m_value == -1)
1033             m_assembler.mvn<64>(dest, src);
1034         else {
1035             LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
1036
1037             if (logicalImm.isValid()) {
1038                 m_assembler.eor<64>(dest, src, logicalImm);
1039                 return;
1040             }
1041
1042             signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
1043             m_assembler.eor<64>(dest, src, dataTempRegister);
1044         }
1045     }
1046     
1047     void xor64(Address src, RegisterID dest)
1048     {
1049         load64(src, getCachedDataTempRegisterIDAndInvalidate());
1050         xor64(dataTempRegister, dest);
1051     }
1052
1053     void not32(RegisterID srcDest)
1054     {
1055         m_assembler.mvn<32>(srcDest, srcDest);
1056     }
1057
1058     void not32(RegisterID src, RegisterID dest)
1059     {
1060         m_assembler.mvn<32>(dest, src);
1061     }
1062
1063     void not64(RegisterID src, RegisterID dest)
1064     {
1065         m_assembler.mvn<64>(dest, src);
1066     }
1067
1068     void not64(RegisterID srcDst)
1069     {
1070         m_assembler.mvn<64>(srcDst, srcDst);
1071     }
1072
1073     // Memory access operations:
1074
1075     void load64(ImplicitAddress address, RegisterID dest)
1076     {
1077         if (tryLoadWithOffset<64>(dest, address.base, address.offset))
1078             return;
1079
1080         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1081         m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
1082     }
1083
1084     void load64(BaseIndex address, RegisterID dest)
1085     {
1086         if (!address.offset && (!address.scale || address.scale == 3)) {
1087             m_assembler.ldr<64>(dest, address.base, address.index, Assembler::UXTX, address.scale);
1088             return;
1089         }
1090
1091         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1092         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, Assembler::UXTX, address.scale);
1093         m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
1094     }
1095
1096     void load64(const void* address, RegisterID dest)
1097     {
1098         load<64>(address, dest);
1099     }
1100
1101     void load64(RegisterID src, PostIndex simm, RegisterID dest)
1102     {
1103         m_assembler.ldr<64>(dest, src, simm);
1104     }
1105
1106     DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest)
1107     {
1108         DataLabel32 label(this);
1109         signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
1110         m_assembler.ldr<64>(dest, address.base, memoryTempRegister, Assembler::SXTW, 0);
1111         return label;
1112     }
1113     
1114     DataLabelCompact load64WithCompactAddressOffsetPatch(Address address, RegisterID dest)
1115     {
1116         ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
1117         DataLabelCompact label(this);
1118         m_assembler.ldr<64>(dest, address.base, address.offset);
1119         return label;
1120     }
1121
1122     void loadPair64(RegisterID src, RegisterID dest1, RegisterID dest2)
1123     {
1124         loadPair64(src, TrustedImm32(0), dest1, dest2);
1125     }
1126
1127     void loadPair64(RegisterID src, TrustedImm32 offset, RegisterID dest1, RegisterID dest2)
1128     {
1129         m_assembler.ldp<64>(dest1, dest2, src, offset.m_value);
1130     }
1131
1132     void loadPair64WithNonTemporalAccess(RegisterID src, RegisterID dest1, RegisterID dest2)
1133     {
1134         loadPair64WithNonTemporalAccess(src, TrustedImm32(0), dest1, dest2);
1135     }
1136
1137     void loadPair64WithNonTemporalAccess(RegisterID src, TrustedImm32 offset, RegisterID dest1, RegisterID dest2)
1138     {
1139         m_assembler.ldnp<64>(dest1, dest2, src, offset.m_value);
1140     }
1141
1142     void abortWithReason(AbortReason reason)
1143     {
1144         // It is safe to use dataTempRegister directly since this is a crashing JIT Assert.
1145         move(TrustedImm32(reason), dataTempRegister);
1146         breakpoint();
1147     }
1148
1149     void abortWithReason(AbortReason reason, intptr_t misc)
1150     {
1151         // It is safe to use memoryTempRegister directly since this is a crashing JIT Assert.
1152         move(TrustedImm64(misc), memoryTempRegister);
1153         abortWithReason(reason);
1154     }
1155
1156     ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
1157     {
1158         ConvertibleLoadLabel result(this);
1159         ASSERT(!(address.offset & ~0xff8));
1160         m_assembler.ldr<64>(dest, address.base, address.offset);
1161         return result;
1162     }
1163
1164     void load32(ImplicitAddress address, RegisterID dest)
1165     {
1166         if (tryLoadWithOffset<32>(dest, address.base, address.offset))
1167             return;
1168
1169         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1170         m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
1171     }
1172
1173     void load32(BaseIndex address, RegisterID dest)
1174     {
1175         if (!address.offset && (!address.scale || address.scale == 2)) {
1176             m_assembler.ldr<32>(dest, address.base, address.index, Assembler::UXTX, address.scale);
1177             return;
1178         }
1179
1180         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1181         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, Assembler::UXTX, address.scale);
1182         m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
1183     }
1184
1185     void load32(const void* address, RegisterID dest)
1186     {
1187         load<32>(address, dest);
1188     }
1189
1190     DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
1191     {
1192         DataLabel32 label(this);
1193         signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
1194         m_assembler.ldr<32>(dest, address.base, memoryTempRegister, Assembler::SXTW, 0);
1195         return label;
1196     }
1197     
1198     DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
1199     {
1200         ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
1201         DataLabelCompact label(this);
1202         m_assembler.ldr<32>(dest, address.base, address.offset);
1203         return label;
1204     }
1205
1206     void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
1207     {
1208         load32(address, dest);
1209     }
1210
1211     void load16(ImplicitAddress address, RegisterID dest)
1212     {
1213         if (tryLoadWithOffset<16>(dest, address.base, address.offset))
1214             return;
1215
1216         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1217         m_assembler.ldrh(dest, address.base, memoryTempRegister);
1218     }
1219     
1220     void load16(BaseIndex address, RegisterID dest)
1221     {
1222         if (!address.offset && (!address.scale || address.scale == 1)) {
1223             m_assembler.ldrh(dest, address.base, address.index, Assembler::UXTX, address.scale);
1224             return;
1225         }
1226
1227         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1228         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, Assembler::UXTX, address.scale);
1229         m_assembler.ldrh(dest, address.base, memoryTempRegister);
1230     }
1231
1232     void load16(ExtendedAddress address, RegisterID dest)
1233     {
1234         moveToCachedReg(TrustedImmPtr(reinterpret_cast<void*>(address.offset)), cachedMemoryTempRegister());
1235         m_assembler.ldrh(dest, memoryTempRegister, address.base, Assembler::UXTX, 1);
1236         if (dest == memoryTempRegister)
1237             cachedMemoryTempRegister().invalidate();
1238     }
1239
1240     void load16Unaligned(ImplicitAddress address, RegisterID dest)
1241     {
1242         load16(address, dest);
1243     }
1244
1245     void load16Unaligned(BaseIndex address, RegisterID dest)
1246     {
1247         load16(address, dest);
1248     }
1249
1250     void load16SignedExtendTo32(ImplicitAddress address, RegisterID dest)
1251     {
1252         if (tryLoadSignedWithOffset<16>(dest, address.base, address.offset))
1253             return;
1254
1255         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1256         m_assembler.ldrsh<32>(dest, address.base, memoryTempRegister);
1257     }
1258
1259     void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
1260     {
1261         if (!address.offset && (!address.scale || address.scale == 1)) {
1262             m_assembler.ldrsh<32>(dest, address.base, address.index, Assembler::UXTX, address.scale);
1263             return;
1264         }
1265
1266         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1267         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, Assembler::UXTX, address.scale);
1268         m_assembler.ldrsh<32>(dest, address.base, memoryTempRegister);
1269     }
1270
1271     void zeroExtend16To32(RegisterID src, RegisterID dest)
1272     {
1273         m_assembler.uxth<32>(dest, src);
1274     }
1275
1276     void signExtend16To32(RegisterID src, RegisterID dest)
1277     {
1278         m_assembler.sxth<32>(dest, src);
1279     }
1280
1281     void load8(ImplicitAddress address, RegisterID dest)
1282     {
1283         if (tryLoadWithOffset<8>(dest, address.base, address.offset))
1284             return;
1285
1286         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1287         m_assembler.ldrb(dest, address.base, memoryTempRegister);
1288     }
1289
1290     void load8(BaseIndex address, RegisterID dest)
1291     {
1292         if (!address.offset && !address.scale) {
1293             m_assembler.ldrb(dest, address.base, address.index, Assembler::UXTX, address.scale);
1294             return;
1295         }
1296
1297         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1298         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, Assembler::UXTX, address.scale);
1299         m_assembler.ldrb(dest, address.base, memoryTempRegister);
1300     }
1301     
1302     void load8(const void* address, RegisterID dest)
1303     {
1304         moveToCachedReg(TrustedImmPtr(address), cachedMemoryTempRegister());
1305         m_assembler.ldrb(dest, memoryTempRegister, ARM64Registers::zr);
1306         if (dest == memoryTempRegister)
1307             cachedMemoryTempRegister().invalidate();
1308     }
1309
1310     void load8(RegisterID src, PostIndex simm, RegisterID dest)
1311     {
1312         m_assembler.ldrb(dest, src, simm);
1313     }
1314
1315     void load8SignedExtendTo32(ImplicitAddress address, RegisterID dest)
1316     {
1317         if (tryLoadSignedWithOffset<8>(dest, address.base, address.offset))
1318             return;
1319
1320         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1321         m_assembler.ldrsb<32>(dest, address.base, memoryTempRegister);
1322     }
1323
1324     void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
1325     {
1326         if (!address.offset && !address.scale) {
1327             m_assembler.ldrsb<32>(dest, address.base, address.index, Assembler::UXTX, address.scale);
1328             return;
1329         }
1330
1331         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1332         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, Assembler::UXTX, address.scale);
1333         m_assembler.ldrsb<32>(dest, address.base, memoryTempRegister);
1334     }
1335
1336     void load8SignedExtendTo32(const void* address, RegisterID dest)
1337     {
1338         moveToCachedReg(TrustedImmPtr(address), cachedMemoryTempRegister());
1339         m_assembler.ldrsb<32>(dest, memoryTempRegister, ARM64Registers::zr);
1340         if (dest == memoryTempRegister)
1341             cachedMemoryTempRegister().invalidate();
1342     }
1343
1344     void zeroExtend8To32(RegisterID src, RegisterID dest)
1345     {
1346         m_assembler.uxtb<32>(dest, src);
1347     }
1348
1349     void signExtend8To32(RegisterID src, RegisterID dest)
1350     {
1351         m_assembler.sxtb<32>(dest, src);
1352     }
1353
1354     void store64(RegisterID src, ImplicitAddress address)
1355     {
1356         if (tryStoreWithOffset<64>(src, address.base, address.offset))
1357             return;
1358
1359         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1360         m_assembler.str<64>(src, address.base, memoryTempRegister);
1361     }
1362
1363     void store64(RegisterID src, BaseIndex address)
1364     {
1365         if (!address.offset && (!address.scale || address.scale == 3)) {
1366             m_assembler.str<64>(src, address.base, address.index, Assembler::UXTX, address.scale);
1367             return;
1368         }
1369
1370         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1371         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, Assembler::UXTX, address.scale);
1372         m_assembler.str<64>(src, address.base, memoryTempRegister);
1373     }
1374     
1375     void store64(RegisterID src, const void* address)
1376     {
1377         store<64>(src, address);
1378     }
1379
1380     void store64(TrustedImm32 imm, ImplicitAddress address)
1381     {
1382         store64(TrustedImm64(imm.m_value), address);
1383     }
1384
1385     void store64(TrustedImm64 imm, ImplicitAddress address)
1386     {
1387         if (!imm.m_value) {
1388             store64(ARM64Registers::zr, address);
1389             return;
1390         }
1391
1392         moveToCachedReg(imm, dataMemoryTempRegister());
1393         store64(dataTempRegister, address);
1394     }
1395
1396     void store64(TrustedImm64 imm, BaseIndex address)
1397     {
1398         if (!imm.m_value) {
1399             store64(ARM64Registers::zr, address);
1400             return;
1401         }
1402
1403         moveToCachedReg(imm, dataMemoryTempRegister());
1404         store64(dataTempRegister, address);
1405     }
1406
1407     void store64(RegisterID src, RegisterID dest, PostIndex simm)
1408     {
1409         m_assembler.str<64>(src, dest, simm);
1410     }
1411     
1412     void storeZero64(ImplicitAddress address)
1413     {
1414         store64(ARM64Registers::zr, address);
1415     }
1416     
1417     void storeZero64(BaseIndex address)
1418     {
1419         store64(ARM64Registers::zr, address);
1420     }
1421     
1422     DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address)
1423     {
1424         DataLabel32 label(this);
1425         signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
1426         m_assembler.str<64>(src, address.base, memoryTempRegister, Assembler::SXTW, 0);
1427         return label;
1428     }
1429
1430     void storePair64(RegisterID src1, RegisterID src2, RegisterID dest)
1431     {
1432         storePair64(src1, src2, dest, TrustedImm32(0));
1433     }
1434
1435     void storePair64(RegisterID src1, RegisterID src2, RegisterID dest, TrustedImm32 offset)
1436     {
1437         m_assembler.stp<64>(src1, src2, dest, offset.m_value);
1438     }
1439
1440     void storePair64WithNonTemporalAccess(RegisterID src1, RegisterID src2, RegisterID dest)
1441     {
1442         storePair64WithNonTemporalAccess(src1, src2, dest, TrustedImm32(0));
1443     }
1444
1445     void storePair64WithNonTemporalAccess(RegisterID src1, RegisterID src2, RegisterID dest, TrustedImm32 offset)
1446     {
1447         m_assembler.stnp<64>(src1, src2, dest, offset.m_value);
1448     }
1449
1450     void store32(RegisterID src, ImplicitAddress address)
1451     {
1452         if (tryStoreWithOffset<32>(src, address.base, address.offset))
1453             return;
1454
1455         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1456         m_assembler.str<32>(src, address.base, memoryTempRegister);
1457     }
1458
1459     void store32(RegisterID src, BaseIndex address)
1460     {
1461         if (!address.offset && (!address.scale || address.scale == 2)) {
1462             m_assembler.str<32>(src, address.base, address.index, Assembler::UXTX, address.scale);
1463             return;
1464         }
1465
1466         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1467         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, Assembler::UXTX, address.scale);
1468         m_assembler.str<32>(src, address.base, memoryTempRegister);
1469     }
1470
1471     void store32(RegisterID src, const void* address)
1472     {
1473         store<32>(src, address);
1474     }
1475
1476     void store32(TrustedImm32 imm, ImplicitAddress address)
1477     {
1478         if (!imm.m_value) {
1479             store32(ARM64Registers::zr, address);
1480             return;
1481         }
1482
1483         moveToCachedReg(imm, dataMemoryTempRegister());
1484         store32(dataTempRegister, address);
1485     }
1486
1487     void store32(TrustedImm32 imm, BaseIndex address)
1488     {
1489         if (!imm.m_value) {
1490             store32(ARM64Registers::zr, address);
1491             return;
1492         }
1493
1494         moveToCachedReg(imm, dataMemoryTempRegister());
1495         store32(dataTempRegister, address);
1496     }
1497
1498     void store32(TrustedImm32 imm, const void* address)
1499     {
1500         if (!imm.m_value) {
1501             store32(ARM64Registers::zr, address);
1502             return;
1503         }
1504
1505         moveToCachedReg(imm, dataMemoryTempRegister());
1506         store32(dataTempRegister, address);
1507     }
1508
1509     void storeZero32(ImplicitAddress address)
1510     {
1511         store32(ARM64Registers::zr, address);
1512     }
1513
1514     void storeZero32(BaseIndex address)
1515     {
1516         store32(ARM64Registers::zr, address);
1517     }
1518
1519     DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
1520     {
1521         DataLabel32 label(this);
1522         signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
1523         m_assembler.str<32>(src, address.base, memoryTempRegister, Assembler::SXTW, 0);
1524         return label;
1525     }
1526
1527     void store16(RegisterID src, ImplicitAddress address)
1528     {
1529         if (tryStoreWithOffset<16>(src, address.base, address.offset))
1530             return;
1531
1532         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1533         m_assembler.strh(src, address.base, memoryTempRegister);
1534     }
1535
1536     void store16(RegisterID src, BaseIndex address)
1537     {
1538         if (!address.offset && (!address.scale || address.scale == 1)) {
1539             m_assembler.strh(src, address.base, address.index, Assembler::UXTX, address.scale);
1540             return;
1541         }
1542
1543         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1544         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, Assembler::UXTX, address.scale);
1545         m_assembler.strh(src, address.base, memoryTempRegister);
1546     }
1547
1548     void storeZero16(ImplicitAddress address)
1549     {
1550         store16(ARM64Registers::zr, address);
1551     }
1552
1553     void storeZero16(BaseIndex address)
1554     {
1555         store16(ARM64Registers::zr, address);
1556     }
1557
1558     void store8(RegisterID src, BaseIndex address)
1559     {
1560         if (!address.offset && !address.scale) {
1561             m_assembler.strb(src, address.base, address.index, Assembler::UXTX, address.scale);
1562             return;
1563         }
1564
1565         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1566         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, Assembler::UXTX, address.scale);
1567         m_assembler.strb(src, address.base, memoryTempRegister);
1568     }
1569
1570     void store8(RegisterID src, void* address)
1571     {
1572         move(TrustedImmPtr(address), getCachedMemoryTempRegisterIDAndInvalidate());
1573         m_assembler.strb(src, memoryTempRegister, 0);
1574     }
1575
1576     void store8(RegisterID src, ImplicitAddress address)
1577     {
1578         if (tryStoreWithOffset<8>(src, address.base, address.offset))
1579             return;
1580
1581         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1582         m_assembler.strb(src, address.base, memoryTempRegister);
1583     }
1584
1585     void store8(TrustedImm32 imm, void* address)
1586     {
1587         TrustedImm32 imm8(static_cast<int8_t>(imm.m_value));
1588         if (!imm8.m_value) {
1589             store8(ARM64Registers::zr, address);
1590             return;
1591         }
1592
1593         move(imm8, getCachedDataTempRegisterIDAndInvalidate());
1594         store8(dataTempRegister, address);
1595     }
1596
1597     void store8(TrustedImm32 imm, ImplicitAddress address)
1598     {
1599         TrustedImm32 imm8(static_cast<int8_t>(imm.m_value));
1600         if (!imm8.m_value) {
1601             store8(ARM64Registers::zr, address);
1602             return;
1603         }
1604
1605         move(imm8, getCachedDataTempRegisterIDAndInvalidate());
1606         store8(dataTempRegister, address);
1607     }
1608
1609     void store8(RegisterID src, RegisterID dest, PostIndex simm)
1610     {
1611         m_assembler.strb(src, dest, simm);
1612     }
1613
1614     void getEffectiveAddress(BaseIndex address, RegisterID dest)
1615     {
1616         m_assembler.add<64>(dest, address.base, address.index, Assembler::LSL, address.scale);
1617         if (address.offset)
1618             add64(TrustedImm32(address.offset), dest);
1619     }
1620
1621     // Floating-point operations:
1622
1623     static bool supportsFloatingPoint() { return true; }
1624     static bool supportsFloatingPointTruncate() { return true; }
1625     static bool supportsFloatingPointSqrt() { return true; }
1626     static bool supportsFloatingPointAbs() { return true; }
1627     static bool supportsFloatingPointRounding() { return true; }
1628     static bool supportsCountPopulation() { return false; }
1629
1630     enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
1631
1632     void absDouble(FPRegisterID src, FPRegisterID dest)
1633     {
1634         m_assembler.fabs<64>(dest, src);
1635     }
1636
1637     void absFloat(FPRegisterID src, FPRegisterID dest)
1638     {
1639         m_assembler.fabs<32>(dest, src);
1640     }
1641
1642     void addDouble(FPRegisterID src, FPRegisterID dest)
1643     {
1644         addDouble(dest, src, dest);
1645     }
1646
1647     void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1648     {
1649         m_assembler.fadd<64>(dest, op1, op2);
1650     }
1651
1652     void addDouble(Address src, FPRegisterID dest)
1653     {
1654         loadDouble(src, fpTempRegister);
1655         addDouble(fpTempRegister, dest);
1656     }
1657
1658     void addDouble(AbsoluteAddress address, FPRegisterID dest)
1659     {
1660         loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister);
1661         addDouble(fpTempRegister, dest);
1662     }
1663
1664     void addFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1665     {
1666         m_assembler.fadd<32>(dest, op1, op2);
1667     }
1668
1669     void ceilDouble(FPRegisterID src, FPRegisterID dest)
1670     {
1671         m_assembler.frintp<64>(dest, src);
1672     }
1673
1674     void ceilFloat(FPRegisterID src, FPRegisterID dest)
1675     {
1676         m_assembler.frintp<32>(dest, src);
1677     }
1678
1679     void floorDouble(FPRegisterID src, FPRegisterID dest)
1680     {
1681         m_assembler.frintm<64>(dest, src);
1682     }
1683
1684     void floorFloat(FPRegisterID src, FPRegisterID dest)
1685     {
1686         m_assembler.frintm<32>(dest, src);
1687     }
1688
1689     void roundTowardNearestIntDouble(FPRegisterID src, FPRegisterID dest)
1690     {
1691         m_assembler.frintn<64>(dest, src);
1692     }
1693
1694     void roundTowardNearestIntFloat(FPRegisterID src, FPRegisterID dest)
1695     {
1696         m_assembler.frintn<32>(dest, src);
1697     }
1698
1699     void roundTowardZeroDouble(FPRegisterID src, FPRegisterID dest)
1700     {
1701         m_assembler.frintz<64>(dest, src);
1702     }
1703
1704     void roundTowardZeroFloat(FPRegisterID src, FPRegisterID dest)
1705     {
1706         m_assembler.frintz<32>(dest, src);
1707     }
1708
1709
1710     // Convert 'src' to an integer, and places the resulting 'dest'.
1711     // If the result is not representable as a 32 bit value, branch.
1712     // May also branch for some values that are representable in 32 bits
1713     // (specifically, in this case, 0).
1714     void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true)
1715     {
1716         m_assembler.fcvtns<32, 64>(dest, src);
1717
1718         // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
1719         m_assembler.scvtf<64, 32>(fpTempRegister, dest);
1720         failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister));
1721
1722         // Test for negative zero.
1723         if (negZeroCheck) {
1724             Jump valueIsNonZero = branchTest32(NonZero, dest);
1725             RegisterID scratch = getCachedMemoryTempRegisterIDAndInvalidate();
1726             m_assembler.fmov<64>(scratch, src);
1727             failureCases.append(makeTestBitAndBranch(scratch, 63, IsNonZero));
1728             valueIsNonZero.link(this);
1729         }
1730     }
1731
1732     Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
1733     {
1734         m_assembler.fcmp<64>(left, right);
1735         return jumpAfterFloatingPointCompare(cond);
1736     }
1737
1738     Jump branchFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
1739     {
1740         m_assembler.fcmp<32>(left, right);
1741         return jumpAfterFloatingPointCompare(cond);
1742     }
1743
1744     void compareDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID dest)
1745     {
1746         floatingPointCompare(cond, left, right, dest, [this] (FPRegisterID arg1, FPRegisterID arg2) {
1747             m_assembler.fcmp<64>(arg1, arg2);
1748         });
1749     }
1750
1751     void compareFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID dest)
1752     {
1753         floatingPointCompare(cond, left, right, dest, [this] (FPRegisterID arg1, FPRegisterID arg2) {
1754             m_assembler.fcmp<32>(arg1, arg2);
1755         });
1756     }
1757
1758     Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
1759     {
1760         m_assembler.fcmp_0<64>(reg);
1761         Jump unordered = makeBranch(Assembler::ConditionVS);
1762         Jump result = makeBranch(Assembler::ConditionNE);
1763         unordered.link(this);
1764         return result;
1765     }
1766
1767     Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID)
1768     {
1769         m_assembler.fcmp_0<64>(reg);
1770         Jump unordered = makeBranch(Assembler::ConditionVS);
1771         Jump notEqual = makeBranch(Assembler::ConditionNE);
1772         unordered.link(this);
1773         // We get here if either unordered or equal.
1774         Jump result = jump();
1775         notEqual.link(this);
1776         return result;
1777     }
1778
1779     Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
1780     {
1781         // Truncate to a 64-bit integer in dataTempRegister, copy the low 32-bit to dest.
1782         m_assembler.fcvtzs<64, 64>(getCachedDataTempRegisterIDAndInvalidate(), src);
1783         zeroExtend32ToPtr(dataTempRegister, dest);
1784         // Check the low 32-bits sign extend to be equal to the full value.
1785         m_assembler.cmp<64>(dataTempRegister, dataTempRegister, Assembler::SXTW, 0);
1786         return Jump(makeBranch(branchType == BranchIfTruncateSuccessful ? Equal : NotEqual));
1787     }
1788
1789     void convertDoubleToFloat(FPRegisterID src, FPRegisterID dest)
1790     {
1791         m_assembler.fcvt<32, 64>(dest, src);
1792     }
1793
1794     void convertFloatToDouble(FPRegisterID src, FPRegisterID dest)
1795     {
1796         m_assembler.fcvt<64, 32>(dest, src);
1797     }
1798     
1799     void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest)
1800     {
1801         move(imm, getCachedDataTempRegisterIDAndInvalidate());
1802         convertInt32ToDouble(dataTempRegister, dest);
1803     }
1804     
1805     void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
1806     {
1807         m_assembler.scvtf<64, 32>(dest, src);
1808     }
1809
1810     void convertInt32ToDouble(Address address, FPRegisterID dest)
1811     {
1812         load32(address, getCachedDataTempRegisterIDAndInvalidate());
1813         convertInt32ToDouble(dataTempRegister, dest);
1814     }
1815
1816     void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest)
1817     {
1818         load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1819         convertInt32ToDouble(dataTempRegister, dest);
1820     }
1821
1822     void convertInt32ToFloat(RegisterID src, FPRegisterID dest)
1823     {
1824         m_assembler.scvtf<32, 32>(dest, src);
1825     }
1826     
1827     void convertInt64ToDouble(RegisterID src, FPRegisterID dest)
1828     {
1829         m_assembler.scvtf<64, 64>(dest, src);
1830     }
1831
1832     void convertInt64ToFloat(RegisterID src, FPRegisterID dest)
1833     {
1834         m_assembler.scvtf<32, 64>(dest, src);
1835     }
1836
1837     void convertUInt64ToDouble(RegisterID src, FPRegisterID dest)
1838     {
1839         m_assembler.ucvtf<64, 64>(dest, src);
1840     }
1841
1842     void convertUInt64ToFloat(RegisterID src, FPRegisterID dest)
1843     {
1844         m_assembler.ucvtf<32, 64>(dest, src);
1845     }
1846
1847     void divDouble(FPRegisterID src, FPRegisterID dest)
1848     {
1849         divDouble(dest, src, dest);
1850     }
1851
1852     void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1853     {
1854         m_assembler.fdiv<64>(dest, op1, op2);
1855     }
1856
1857     void divFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1858     {
1859         m_assembler.fdiv<32>(dest, op1, op2);
1860     }
1861
1862     void loadDouble(ImplicitAddress address, FPRegisterID dest)
1863     {
1864         if (tryLoadWithOffset<64>(dest, address.base, address.offset))
1865             return;
1866
1867         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1868         m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
1869     }
1870
1871     void loadDouble(BaseIndex address, FPRegisterID dest)
1872     {
1873         if (!address.offset && (!address.scale || address.scale == 3)) {
1874             m_assembler.ldr<64>(dest, address.base, address.index, Assembler::UXTX, address.scale);
1875             return;
1876         }
1877
1878         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1879         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, Assembler::UXTX, address.scale);
1880         m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
1881     }
1882     
1883     void loadDouble(TrustedImmPtr address, FPRegisterID dest)
1884     {
1885         moveToCachedReg(address, cachedMemoryTempRegister());
1886         m_assembler.ldr<64>(dest, memoryTempRegister, ARM64Registers::zr);
1887     }
1888
1889     void loadFloat(ImplicitAddress address, FPRegisterID dest)
1890     {
1891         if (tryLoadWithOffset<32>(dest, address.base, address.offset))
1892             return;
1893
1894         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1895         m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
1896     }
1897
1898     void loadFloat(BaseIndex address, FPRegisterID dest)
1899     {
1900         if (!address.offset && (!address.scale || address.scale == 2)) {
1901             m_assembler.ldr<32>(dest, address.base, address.index, Assembler::UXTX, address.scale);
1902             return;
1903         }
1904
1905         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1906         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, Assembler::UXTX, address.scale);
1907         m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
1908     }
1909
1910     void loadFloat(TrustedImmPtr address, FPRegisterID dest)
1911     {
1912         moveToCachedReg(address, cachedMemoryTempRegister());
1913         m_assembler.ldr<32>(dest, memoryTempRegister, ARM64Registers::zr);
1914     }
1915
1916     void moveDouble(FPRegisterID src, FPRegisterID dest)
1917     {
1918         m_assembler.fmov<64>(dest, src);
1919     }
1920
1921     void moveZeroToDouble(FPRegisterID reg)
1922     {
1923         m_assembler.fmov<64>(reg, ARM64Registers::zr);
1924     }
1925
1926     void moveDoubleTo64(FPRegisterID src, RegisterID dest)
1927     {
1928         m_assembler.fmov<64>(dest, src);
1929     }
1930
1931     void moveFloatTo32(FPRegisterID src, RegisterID dest)
1932     {
1933         m_assembler.fmov<32>(dest, src);
1934     }
1935
1936     void move64ToDouble(RegisterID src, FPRegisterID dest)
1937     {
1938         m_assembler.fmov<64>(dest, src);
1939     }
1940
1941     void move32ToFloat(RegisterID src, FPRegisterID dest)
1942     {
1943         m_assembler.fmov<32>(dest, src);
1944     }
1945
1946     void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
1947     {
1948         m_assembler.fcmp<64>(left, right);
1949         moveConditionallyAfterFloatingPointCompare<64>(cond, src, dest);
1950     }
1951
1952     void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
1953     {
1954         m_assembler.fcmp<64>(left, right);
1955         moveConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest);
1956     }
1957
1958     void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
1959     {
1960         m_assembler.fcmp<32>(left, right);
1961         moveConditionallyAfterFloatingPointCompare<64>(cond, src, dest);
1962     }
1963
1964     void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
1965     {
1966         m_assembler.fcmp<32>(left, right);
1967         moveConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest);
1968     }
1969
1970     template<int datasize>
1971     void moveConditionallyAfterFloatingPointCompare(DoubleCondition cond, RegisterID src, RegisterID dest)
1972     {
1973         if (cond == DoubleNotEqual) {
1974             Jump unordered = makeBranch(Assembler::ConditionVS);
1975             m_assembler.csel<datasize>(dest, src, dest, Assembler::ConditionNE);
1976             unordered.link(this);
1977             return;
1978         }
1979         if (cond == DoubleEqualOrUnordered) {
1980             // If the compare is unordered, src is copied to dest and the
1981             // next csel has all arguments equal to src.
1982             // If the compare is ordered, dest is unchanged and EQ decides
1983             // what value to set.
1984             m_assembler.csel<datasize>(dest, src, dest, Assembler::ConditionVS);
1985             m_assembler.csel<datasize>(dest, src, dest, Assembler::ConditionEQ);
1986             return;
1987         }
1988         m_assembler.csel<datasize>(dest, src, dest, ARM64Condition(cond));
1989     }
1990
1991     template<int datasize>
1992     void moveConditionallyAfterFloatingPointCompare(DoubleCondition cond, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
1993     {
1994         if (cond == DoubleNotEqual) {
1995             Jump unordered = makeBranch(Assembler::ConditionVS);
1996             m_assembler.csel<datasize>(dest, thenCase, elseCase, Assembler::ConditionNE);
1997             unordered.link(this);
1998             return;
1999         }
2000         if (cond == DoubleEqualOrUnordered) {
2001             // If the compare is unordered, thenCase is copied to elseCase and the
2002             // next csel has all arguments equal to thenCase.
2003             // If the compare is ordered, dest is unchanged and EQ decides
2004             // what value to set.
2005             m_assembler.csel<datasize>(elseCase, thenCase, elseCase, Assembler::ConditionVS);
2006             m_assembler.csel<datasize>(dest, thenCase, elseCase, Assembler::ConditionEQ);
2007             return;
2008         }
2009         m_assembler.csel<datasize>(dest, thenCase, elseCase, ARM64Condition(cond));
2010     }
2011
2012     template<int datasize>
2013     void moveDoubleConditionallyAfterFloatingPointCompare(DoubleCondition cond, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2014     {
2015         if (cond == DoubleNotEqual) {
2016             Jump unordered = makeBranch(Assembler::ConditionVS);
2017             m_assembler.fcsel<datasize>(dest, thenCase, elseCase, Assembler::ConditionNE);
2018             unordered.link(this);
2019             return;
2020         }
2021         if (cond == DoubleEqualOrUnordered) {
2022             // If the compare is unordered, thenCase is copied to elseCase and the
2023             // next csel has all arguments equal to thenCase.
2024             // If the compare is ordered, dest is unchanged and EQ decides
2025             // what value to set.
2026             m_assembler.fcsel<datasize>(elseCase, thenCase, elseCase, Assembler::ConditionVS);
2027             m_assembler.fcsel<datasize>(dest, thenCase, elseCase, Assembler::ConditionEQ);
2028             return;
2029         }
2030         m_assembler.fcsel<datasize>(dest, thenCase, elseCase, ARM64Condition(cond));
2031     }
2032
2033     void moveDoubleConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2034     {
2035         m_assembler.fcmp<64>(left, right);
2036         moveDoubleConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest);
2037     }
2038
2039     void moveDoubleConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2040     {
2041         m_assembler.fcmp<32>(left, right);
2042         moveDoubleConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest);
2043     }
2044
2045     void mulDouble(FPRegisterID src, FPRegisterID dest)
2046     {
2047         mulDouble(dest, src, dest);
2048     }
2049
2050     void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
2051     {
2052         m_assembler.fmul<64>(dest, op1, op2);
2053     }
2054
2055     void mulDouble(Address src, FPRegisterID dest)
2056     {
2057         loadDouble(src, fpTempRegister);
2058         mulDouble(fpTempRegister, dest);
2059     }
2060
2061     void mulFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
2062     {
2063         m_assembler.fmul<32>(dest, op1, op2);
2064     }
2065
2066     void andDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
2067     {
2068         m_assembler.vand<64>(dest, op1, op2);
2069     }
2070
2071     void andFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
2072     {
2073         andDouble(op1, op2, dest);
2074     }
2075
2076     void orDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
2077     {
2078         m_assembler.vorr<64>(dest, op1, op2);
2079     }
2080
2081     void orFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
2082     {
2083         orDouble(op1, op2, dest);
2084     }
2085
2086     void negateDouble(FPRegisterID src, FPRegisterID dest)
2087     {
2088         m_assembler.fneg<64>(dest, src);
2089     }
2090
2091     void negateFloat(FPRegisterID src, FPRegisterID dest)
2092     {
2093         m_assembler.fneg<32>(dest, src);
2094     }
2095
2096     void sqrtDouble(FPRegisterID src, FPRegisterID dest)
2097     {
2098         m_assembler.fsqrt<64>(dest, src);
2099     }
2100
2101     void sqrtFloat(FPRegisterID src, FPRegisterID dest)
2102     {
2103         m_assembler.fsqrt<32>(dest, src);
2104     }
2105
2106     void storeDouble(FPRegisterID src, ImplicitAddress address)
2107     {
2108         if (tryStoreWithOffset<64>(src, address.base, address.offset))
2109             return;
2110
2111         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
2112         m_assembler.str<64>(src, address.base, memoryTempRegister);
2113     }
2114
2115     void storeDouble(FPRegisterID src, TrustedImmPtr address)
2116     {
2117         moveToCachedReg(address, cachedMemoryTempRegister());
2118         m_assembler.str<64>(src, memoryTempRegister, ARM64Registers::zr);
2119     }
2120
2121     void storeDouble(FPRegisterID src, BaseIndex address)
2122     {
2123         if (!address.offset && (!address.scale || address.scale == 3)) {
2124             m_assembler.str<64>(src, address.base, address.index, Assembler::UXTX, address.scale);
2125             return;
2126         }
2127
2128         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
2129         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, Assembler::UXTX, address.scale);
2130         m_assembler.str<64>(src, address.base, memoryTempRegister);
2131     }
2132
2133     void storeFloat(FPRegisterID src, ImplicitAddress address)
2134     {
2135         if (tryStoreWithOffset<32>(src, address.base, address.offset))
2136             return;
2137
2138         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
2139         m_assembler.str<32>(src, address.base, memoryTempRegister);
2140     }
2141     
2142     void storeFloat(FPRegisterID src, BaseIndex address)
2143     {
2144         if (!address.offset && (!address.scale || address.scale == 2)) {
2145             m_assembler.str<32>(src, address.base, address.index, Assembler::UXTX, address.scale);
2146             return;
2147         }
2148
2149         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
2150         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, Assembler::UXTX, address.scale);
2151         m_assembler.str<32>(src, address.base, memoryTempRegister);
2152     }
2153
2154     void subDouble(FPRegisterID src, FPRegisterID dest)
2155     {
2156         subDouble(dest, src, dest);
2157     }
2158
2159     void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
2160     {
2161         m_assembler.fsub<64>(dest, op1, op2);
2162     }
2163
2164     void subDouble(Address src, FPRegisterID dest)
2165     {
2166         loadDouble(src, fpTempRegister);
2167         subDouble(fpTempRegister, dest);
2168     }
2169
2170     void subFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
2171     {
2172         m_assembler.fsub<32>(dest, op1, op2);
2173     }
2174
2175     // Result is undefined if the value is outside of the integer range.
2176     void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
2177     {
2178         m_assembler.fcvtzs<32, 64>(dest, src);
2179     }
2180
2181     void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
2182     {
2183         m_assembler.fcvtzu<32, 64>(dest, src);
2184     }
2185
2186     void truncateDoubleToInt64(FPRegisterID src, RegisterID dest)
2187     {
2188         m_assembler.fcvtzs<64, 64>(dest, src);
2189     }
2190
2191     void truncateDoubleToUint64(FPRegisterID src, RegisterID dest, FPRegisterID, FPRegisterID)
2192     {
2193         truncateDoubleToUint64(src, dest);
2194     }
2195
2196     void truncateDoubleToUint64(FPRegisterID src, RegisterID dest)
2197     {
2198         m_assembler.fcvtzu<64, 64>(dest, src);
2199     }
2200
2201     void truncateFloatToInt32(FPRegisterID src, RegisterID dest)
2202     {
2203         m_assembler.fcvtzs<32, 32>(dest, src);
2204     }
2205
2206     void truncateFloatToUint32(FPRegisterID src, RegisterID dest)
2207     {
2208         m_assembler.fcvtzu<32, 32>(dest, src);
2209     }
2210
2211     void truncateFloatToInt64(FPRegisterID src, RegisterID dest)
2212     {
2213         m_assembler.fcvtzs<64, 32>(dest, src);
2214     }
2215
2216     void truncateFloatToUint64(FPRegisterID src, RegisterID dest, FPRegisterID, FPRegisterID)
2217     {
2218         truncateFloatToUint64(src, dest);
2219     }
2220
2221     void truncateFloatToUint64(FPRegisterID src, RegisterID dest)
2222     {
2223         m_assembler.fcvtzu<64, 32>(dest, src);
2224     }
2225
2226     // Stack manipulation operations:
2227     //
2228     // The ABI is assumed to provide a stack abstraction to memory,
2229     // containing machine word sized units of data. Push and pop
2230     // operations add and remove a single register sized unit of data
2231     // to or from the stack. These operations are not supported on
2232     // ARM64. Peek and poke operations read or write values on the
2233     // stack, without moving the current stack position. Additionally,
2234     // there are popToRestore and pushToSave operations, which are
2235     // designed just for quick-and-dirty saving and restoring of
2236     // temporary values. These operations don't claim to have any
2237     // ABI compatibility.
2238     
2239     void pop(RegisterID) NO_RETURN_DUE_TO_CRASH
2240     {
2241         CRASH();
2242     }
2243
2244     void push(RegisterID) NO_RETURN_DUE_TO_CRASH
2245     {
2246         CRASH();
2247     }
2248
2249     void push(Address) NO_RETURN_DUE_TO_CRASH
2250     {
2251         CRASH();
2252     }
2253
2254     void push(TrustedImm32) NO_RETURN_DUE_TO_CRASH
2255     {
2256         CRASH();
2257     }
2258
2259     void popPair(RegisterID dest1, RegisterID dest2)
2260     {
2261         m_assembler.ldp<64>(dest1, dest2, ARM64Registers::sp, PairPostIndex(16));
2262     }
2263
2264     void pushPair(RegisterID src1, RegisterID src2)
2265     {
2266         m_assembler.stp<64>(src1, src2, ARM64Registers::sp, PairPreIndex(-16));
2267     }
2268
2269     void popToRestore(RegisterID dest)
2270     {
2271         m_assembler.ldr<64>(dest, ARM64Registers::sp, PostIndex(16));
2272     }
2273
2274     void pushToSave(RegisterID src)
2275     {
2276         m_assembler.str<64>(src, ARM64Registers::sp, PreIndex(-16));
2277     }
2278     
2279     void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm)
2280     {
2281         // We can use any non-hardware reserved register here since we restore its value.
2282         // We pick dataTempRegister arbitrarily. We don't need to invalidate it here since
2283         // we restore its original value.
2284         RegisterID reg = dataTempRegister;
2285
2286         pushPair(reg, reg);
2287         move(imm, reg);
2288         store64(reg, stackPointerRegister);
2289         load64(Address(stackPointerRegister, 8), reg);
2290     }
2291
2292     void pushToSave(Address address)
2293     {
2294         load32(address, getCachedDataTempRegisterIDAndInvalidate());
2295         pushToSave(dataTempRegister);
2296     }
2297
2298     void pushToSave(TrustedImm32 imm)
2299     {
2300         move(imm, getCachedDataTempRegisterIDAndInvalidate());
2301         pushToSave(dataTempRegister);
2302     }
2303     
2304     void popToRestore(FPRegisterID dest)
2305     {
2306         loadDouble(stackPointerRegister, dest);
2307         add64(TrustedImm32(16), stackPointerRegister);
2308     }
2309     
2310     void pushToSave(FPRegisterID src)
2311     {
2312         sub64(TrustedImm32(16), stackPointerRegister);
2313         storeDouble(src, stackPointerRegister);
2314     }
2315
2316     static ptrdiff_t pushToSaveByteOffset() { return 16; }
2317
2318     // Register move operations:
2319
2320     void move(RegisterID src, RegisterID dest)
2321     {
2322         if (src != dest)
2323             m_assembler.mov<64>(dest, src);
2324     }
2325
2326     void move(TrustedImm32 imm, RegisterID dest)
2327     {
2328         moveInternal<TrustedImm32, int32_t>(imm, dest);
2329     }
2330
2331     void move(TrustedImmPtr imm, RegisterID dest)
2332     {
2333         moveInternal<TrustedImmPtr, intptr_t>(imm, dest);
2334     }
2335
2336     void move(TrustedImm64 imm, RegisterID dest)
2337     {
2338         moveInternal<TrustedImm64, int64_t>(imm, dest);
2339     }
2340
2341     void swap(RegisterID reg1, RegisterID reg2)
2342     {
2343         move(reg1, getCachedDataTempRegisterIDAndInvalidate());
2344         move(reg2, reg1);
2345         move(dataTempRegister, reg2);
2346     }
2347
2348     void swap(FPRegisterID reg1, FPRegisterID reg2)
2349     {
2350         moveDouble(reg1, fpTempRegister);
2351         moveDouble(reg2, reg1);
2352         moveDouble(fpTempRegister, reg2);
2353     }
2354
2355     void signExtend32ToPtr(TrustedImm32 imm, RegisterID dest)
2356     {
2357         move(TrustedImmPtr(reinterpret_cast<void*>(static_cast<intptr_t>(imm.m_value))), dest);
2358     }
2359     
2360     void signExtend32ToPtr(RegisterID src, RegisterID dest)
2361     {
2362         m_assembler.sxtw(dest, src);
2363     }
2364
2365     void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
2366     {
2367         m_assembler.uxtw(dest, src);
2368     }
2369
2370     void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest)
2371     {
2372         m_assembler.cmp<32>(left, right);
2373         m_assembler.csel<64>(dest, src, dest, ARM64Condition(cond));
2374     }
2375
2376     void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2377     {
2378         m_assembler.cmp<32>(left, right);
2379         m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2380     }
2381
2382     void moveConditionally32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2383     {
2384         if (!right.m_value) {
2385             if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
2386                 moveConditionallyTest32(*resultCondition, left, left, thenCase, elseCase, dest);
2387                 return;
2388             }
2389         }
2390
2391         if (isUInt12(right.m_value))
2392             m_assembler.cmp<32>(left, UInt12(right.m_value));
2393         else if (isUInt12(-right.m_value))
2394             m_assembler.cmn<32>(left, UInt12(-right.m_value));
2395         else {
2396             moveToCachedReg(right, dataMemoryTempRegister());
2397             m_assembler.cmp<32>(left, dataTempRegister);
2398         }
2399         m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2400     }
2401
2402     void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest)
2403     {
2404         m_assembler.cmp<64>(left, right);
2405         m_assembler.csel<64>(dest, src, dest, ARM64Condition(cond));
2406     }
2407
2408     void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2409     {
2410         m_assembler.cmp<64>(left, right);
2411         m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2412     }
2413
2414     void moveConditionally64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2415     {
2416         if (!right.m_value) {
2417             if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
2418                 moveConditionallyTest64(*resultCondition, left, left, thenCase, elseCase, dest);
2419                 return;
2420             }
2421         }
2422
2423         if (isUInt12(right.m_value))
2424             m_assembler.cmp<64>(left, UInt12(right.m_value));
2425         else if (isUInt12(-right.m_value))
2426             m_assembler.cmn<64>(left, UInt12(-right.m_value));
2427         else {
2428             moveToCachedReg(right, dataMemoryTempRegister());
2429             m_assembler.cmp<64>(left, dataTempRegister);
2430         }
2431         m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2432     }
2433
2434     void moveConditionallyTest32(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest)
2435     {
2436         m_assembler.tst<32>(testReg, mask);
2437         m_assembler.csel<64>(dest, src, dest, ARM64Condition(cond));
2438     }
2439
2440     void moveConditionallyTest32(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2441     {
2442         m_assembler.tst<32>(left, right);
2443         m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2444     }
2445
2446     void moveConditionallyTest32(ResultCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2447     {
2448         test32(left, right);
2449         m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2450     }
2451
2452     void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest)
2453     {
2454         m_assembler.tst<64>(testReg, mask);
2455         m_assembler.csel<64>(dest, src, dest, ARM64Condition(cond));
2456     }
2457
2458     void moveConditionallyTest64(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
2459     {
2460         m_assembler.tst<64>(left, right);
2461         m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2462     }
2463
2464     void moveDoubleConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2465     {
2466         m_assembler.cmp<32>(left, right);
2467         m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2468     }
2469
2470     void moveDoubleConditionally32(RelationalCondition cond, RegisterID left, TrustedImm32 right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2471     {
2472         if (!right.m_value) {
2473             if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
2474                 moveDoubleConditionallyTest32(*resultCondition, left, left, thenCase, elseCase, dest);
2475                 return;
2476             }
2477         }
2478
2479         if (isUInt12(right.m_value))
2480             m_assembler.cmp<32>(left, UInt12(right.m_value));
2481         else if (isUInt12(-right.m_value))
2482             m_assembler.cmn<32>(left, UInt12(-right.m_value));
2483         else {
2484             moveToCachedReg(right, dataMemoryTempRegister());
2485             m_assembler.cmp<32>(left, dataTempRegister);
2486         }
2487         m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2488     }
2489
2490     void moveDoubleConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2491     {
2492         m_assembler.cmp<64>(left, right);
2493         m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2494     }
2495
2496     void moveDoubleConditionally64(RelationalCondition cond, RegisterID left, TrustedImm32 right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2497     {
2498         if (!right.m_value) {
2499             if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
2500                 moveDoubleConditionallyTest64(*resultCondition, left, left, thenCase, elseCase, dest);
2501                 return;
2502             }
2503         }
2504
2505         if (isUInt12(right.m_value))
2506             m_assembler.cmp<64>(left, UInt12(right.m_value));
2507         else if (isUInt12(-right.m_value))
2508             m_assembler.cmn<64>(left, UInt12(-right.m_value));
2509         else {
2510             moveToCachedReg(right, dataMemoryTempRegister());
2511             m_assembler.cmp<64>(left, dataTempRegister);
2512         }
2513         m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2514     }
2515
2516     void moveDoubleConditionallyTest32(ResultCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2517     {
2518         m_assembler.tst<32>(left, right);
2519         m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2520     }
2521
2522     void moveDoubleConditionallyTest32(ResultCondition cond, RegisterID left, TrustedImm32 right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2523     {
2524         test32(left, right);
2525         m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2526     }
2527
2528     void moveDoubleConditionallyTest64(ResultCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
2529     {
2530         m_assembler.tst<64>(left, right);
2531         m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
2532     }
2533
2534     // Bit field operations:
2535
2536     // destBitOffset is the top bit of the destination where the bits should be copied to. Zero is the lowest order bit.
2537     void bitFieldInsert64(RegisterID source, unsigned destBitOffset, unsigned width, RegisterID dest)
2538     {
2539         ASSERT(width <= 64 - destBitOffset && destBitOffset < 64);
2540         m_assembler.bfi<64>(dest, source, destBitOffset, width);
2541     }
2542
2543     // Forwards / external control flow operations:
2544     //
2545     // This set of jump and conditional branch operations return a Jump
2546     // object which may linked at a later point, allow forwards jump,
2547     // or jumps that will require external linkage (after the code has been
2548     // relocated).
2549     //
2550     // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
2551     // respecitvely, for unsigned comparisons the names b, a, be, and ae are
2552     // used (representing the names 'below' and 'above').
2553     //
2554     // Operands to the comparision are provided in the expected order, e.g.
2555     // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
2556     // treated as a signed 32bit value, is less than or equal to 5.
2557     //
2558     // jz and jnz test whether the first operand is equal to zero, and take
2559     // an optional second operand of a mask under which to perform the test.
2560
2561     Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
2562     {
2563         m_assembler.cmp<32>(left, right);
2564         return Jump(makeBranch(cond));
2565     }
2566
2567     Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
2568     {
2569         if (!right.m_value) {
2570             if (auto resultCondition = commuteCompareToZeroIntoTest(cond))
2571                 return branchTest32(*resultCondition, left, left);
2572         }
2573
2574         if (isUInt12(right.m_value))
2575             m_assembler.cmp<32>(left, UInt12(right.m_value));
2576         else if (isUInt12(-right.m_value))
2577             m_assembler.cmn<32>(left, UInt12(-right.m_value));
2578         else {
2579             moveToCachedReg(right, dataMemoryTempRegister());
2580             m_assembler.cmp<32>(left, dataTempRegister);
2581         }
2582         return Jump(makeBranch(cond));
2583     }
2584
2585     Jump branch32(RelationalCondition cond, RegisterID left, Address right)
2586     {
2587         load32(right, getCachedMemoryTempRegisterIDAndInvalidate());
2588         return branch32(cond, left, memoryTempRegister);
2589     }
2590
2591     Jump branch32(RelationalCondition cond, Address left, RegisterID right)
2592     {
2593         load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
2594         return branch32(cond, memoryTempRegister, right);
2595     }
2596
2597     Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
2598     {
2599         load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
2600         return branch32(cond, memoryTempRegister, right);
2601     }
2602
2603     Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
2604     {
2605         load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
2606         return branch32(cond, memoryTempRegister, right);
2607     }
2608
2609     Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
2610     {
2611         load32(left.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
2612         return branch32(cond, dataTempRegister, right);
2613     }
2614
2615     Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
2616     {
2617         load32(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
2618         return branch32(cond, memoryTempRegister, right);
2619     }
2620
2621     Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right)
2622     {
2623         if (right == ARM64Registers::sp) {
2624             if (cond == Equal && left != ARM64Registers::sp) {
2625                 // CMP can only use SP for the left argument, since we are testing for equality, the order
2626                 // does not matter here.
2627                 std::swap(left, right);
2628             } else {
2629                 move(right, getCachedDataTempRegisterIDAndInvalidate());
2630                 right = dataTempRegister;
2631             }
2632         }
2633         m_assembler.cmp<64>(left, right);
2634         return Jump(makeBranch(cond));
2635     }
2636
2637     Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm32 right)
2638     {
2639         if (!right.m_value) {
2640             if (auto resultCondition = commuteCompareToZeroIntoTest(cond))
2641                 return branchTest64(*resultCondition, left, left);
2642         }
2643
2644         if (isUInt12(right.m_value))
2645             m_assembler.cmp<64>(left, UInt12(right.m_value));
2646         else if (isUInt12(-right.m_value))
2647             m_assembler.cmn<64>(left, UInt12(-right.m_value));
2648         else {
2649             moveToCachedReg(right, dataMemoryTempRegister());
2650             m_assembler.cmp<64>(left, dataTempRegister);
2651         }
2652         return Jump(makeBranch(cond));
2653     }
2654
2655     Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right)
2656     {
2657         intptr_t immediate = right.m_value;
2658         if (!immediate) {
2659             if (auto resultCondition = commuteCompareToZeroIntoTest(cond))
2660                 return branchTest64(*resultCondition, left, left);
2661         }
2662
2663         if (isUInt12(immediate))
2664             m_assembler.cmp<64>(left, UInt12(static_cast<int32_t>(immediate)));
2665         else if (isUInt12(-immediate))
2666             m_assembler.cmn<64>(left, UInt12(static_cast<int32_t>(-immediate)));
2667         else {
2668             moveToCachedReg(right, dataMemoryTempRegister());
2669             m_assembler.cmp<64>(left, dataTempRegister);
2670         }
2671         return Jump(makeBranch(cond));
2672     }
2673
2674     Jump branch64(RelationalCondition cond, RegisterID left, Address right)
2675     {
2676         load64(right, getCachedMemoryTempRegisterIDAndInvalidate());
2677         return branch64(cond, left, memoryTempRegister);
2678     }
2679
2680     Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
2681     {
2682         load64(left.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
2683         return branch64(cond, dataTempRegister, right);
2684     }
2685
2686     Jump branch64(RelationalCondition cond, Address left, RegisterID right)
2687     {
2688         load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
2689         return branch64(cond, memoryTempRegister, right);
2690     }
2691
2692     Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right)
2693     {
2694         load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
2695         return branch64(cond, memoryTempRegister, right);
2696     }
2697
2698     Jump branch64(RelationalCondition cond, BaseIndex left, RegisterID right)
2699     {
2700         load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
2701         return branch64(cond, memoryTempRegister, right);
2702     }
2703
2704     Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
2705     {
2706         return branch64(cond, left, right);
2707     }
2708
2709     Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
2710     {
2711         TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
2712         MacroAssemblerHelpers::load8OnCondition(*this, cond, left, getCachedMemoryTempRegisterIDAndInvalidate());
2713         return branch32(cond, memoryTempRegister, right8);
2714     }
2715
2716     Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
2717     {
2718         TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
2719         MacroAssemblerHelpers::load8OnCondition(*this, cond, left, getCachedMemoryTempRegisterIDAndInvalidate());
2720         return branch32(cond, memoryTempRegister, right8);
2721     }
2722     
2723     Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
2724     {
2725         TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
2726         MacroAssemblerHelpers::load8OnCondition(*this, cond, left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
2727         return branch32(cond, memoryTempRegister, right8);
2728     }
2729     
2730     Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
2731     {
2732         if (reg == mask && (cond == Zero || cond == NonZero))
2733             return Jump(makeCompareAndBranch<32>(static_cast<ZeroCondition>(cond), reg));
2734         m_assembler.tst<32>(reg, mask);
2735         return Jump(makeBranch(cond));
2736     }
2737
2738     void test32(RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
2739     {
2740         if (mask.m_value == -1)
2741             m_assembler.tst<32>(reg, reg);
2742         else {
2743             LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value);
2744
2745             if (logicalImm.isValid())
2746                 m_assembler.tst<32>(reg, logicalImm);
2747             else {
2748                 move(mask, getCachedDataTempRegisterIDAndInvalidate());
2749                 m_assembler.tst<32>(reg, dataTempRegister);
2750             }
2751         }
2752     }
2753
2754     Jump branch(ResultCondition cond)
2755     {
2756         return Jump(makeBranch(cond));
2757     }
2758
2759     Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
2760     {
2761         if (mask.m_value == -1) {
2762             if ((cond == Zero) || (cond == NonZero))
2763                 return Jump(makeCompareAndBranch<32>(static_cast<ZeroCondition>(cond), reg));
2764             m_assembler.tst<32>(reg, reg);
2765         } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
2766             return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
2767         else {
2768             LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value);
2769             if (logicalImm.isValid()) {
2770                 m_assembler.tst<32>(reg, logicalImm);
2771                 return Jump(makeBranch(cond));
2772             }
2773
2774             move(mask, getCachedDataTempRegisterIDAndInvalidate());
2775             m_assembler.tst<32>(reg, dataTempRegister);
2776         }
2777         return Jump(makeBranch(cond));
2778     }
2779
2780     Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
2781     {
2782         load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
2783         return branchTest32(cond, memoryTempRegister, mask);
2784     }
2785
2786     Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
2787     {
2788         load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
2789         return branchTest32(cond, memoryTempRegister, mask);
2790     }
2791
2792     Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask)
2793     {
2794         if (reg == mask && (cond == Zero || cond == NonZero))
2795             return Jump(makeCompareAndBranch<64>(static_cast<ZeroCondition>(cond), reg));
2796         m_assembler.tst<64>(reg, mask);
2797         return Jump(makeBranch(cond));
2798     }
2799
2800     Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
2801     {
2802         if (mask.m_value == -1) {
2803             if ((cond == Zero) || (cond == NonZero))
2804                 return Jump(makeCompareAndBranch<64>(static_cast<ZeroCondition>(cond), reg));
2805             m_assembler.tst<64>(reg, reg);
2806         } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
2807             return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
2808         else {
2809             LogicalImmediate logicalImm = LogicalImmediate::create64(mask.m_value);
2810
2811             if (logicalImm.isValid()) {
2812                 m_assembler.tst<64>(reg, logicalImm);
2813                 return Jump(makeBranch(cond));
2814             }
2815
2816             signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
2817             m_assembler.tst<64>(reg, dataTempRegister);
2818         }
2819         return Jump(makeBranch(cond));
2820     }
2821
2822     Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm64 mask)
2823     {
2824         if (mask.m_value == -1) {
2825             if ((cond == Zero) || (cond == NonZero))
2826                 return Jump(makeCompareAndBranch<64>(static_cast<ZeroCondition>(cond), reg));
2827             m_assembler.tst<64>(reg, reg);
2828         } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
2829             return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
2830         else {
2831             LogicalImmediate logicalImm = LogicalImmediate::create64(mask.m_value);
2832
2833             if (logicalImm.isValid()) {
2834                 m_assembler.tst<64>(reg, logicalImm);
2835                 return Jump(makeBranch(cond));
2836             }
2837
2838             move(mask, getCachedDataTempRegisterIDAndInvalidate());
2839             m_assembler.tst<64>(reg, dataTempRegister);
2840         }
2841         return Jump(makeBranch(cond));
2842     }
2843
2844     Jump branchTest64(ResultCondition cond, Address address, RegisterID mask)
2845     {
2846         load64(address, getCachedDataTempRegisterIDAndInvalidate());
2847         return branchTest64(cond, dataTempRegister, mask);
2848     }
2849
2850     Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
2851     {
2852         load64(address, getCachedDataTempRegisterIDAndInvalidate());
2853         return branchTest64(cond, dataTempRegister, mask);
2854     }
2855
2856     Jump branchTest64(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
2857     {
2858         load64(address, getCachedDataTempRegisterIDAndInvalidate());
2859         return branchTest64(cond, dataTempRegister, mask);
2860     }
2861
2862     Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
2863     {
2864         load64(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
2865         return branchTest64(cond, dataTempRegister, mask);
2866     }
2867
2868     Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
2869     {
2870         TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
2871         MacroAssemblerHelpers::load8OnCondition(*this, cond, address, getCachedDataTempRegisterIDAndInvalidate());
2872         return branchTest32(cond, dataTempRegister, mask8);
2873     }
2874
2875     Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
2876     {
2877         TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
2878         MacroAssemblerHelpers::load8OnCondition(*this, cond, address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
2879         return branchTest32(cond, dataTempRegister, mask8);
2880     }
2881
2882     Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
2883     {
2884         TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
2885         move(TrustedImmPtr(reinterpret_cast<void*>(address.offset)), getCachedDataTempRegisterIDAndInvalidate());
2886
2887         if (MacroAssemblerHelpers::isUnsigned<MacroAssemblerARM64>(cond))
2888             m_assembler.ldrb(dataTempRegister, address.base, dataTempRegister);
2889         else
2890             m_assembler.ldrsb<32>(dataTempRegister, address.base, dataTempRegister);
2891
2892         return branchTest32(cond, dataTempRegister, mask8);
2893     }
2894
2895     Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
2896     {
2897         TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
2898         MacroAssemblerHelpers::load8OnCondition(*this, cond, address, getCachedDataTempRegisterIDAndInvalidate());
2899         return branchTest32(cond, dataTempRegister, mask8);
2900     }
2901
2902     Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
2903     {
2904         return branch32(cond, left, right);
2905     }
2906
2907
2908     // Arithmetic control flow operations:
2909     //
2910     // This set of conditional branch operations branch based
2911     // on the result of an arithmetic operation. The operation
2912     // is performed as normal, storing the result.
2913     //
2914     // * jz operations branch if the result is zero.
2915     // * jo operations branch if the (signed) arithmetic
2916     //   operation caused an overflow to occur.
2917     
2918     Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
2919     {
2920         m_assembler.add<32, S>(dest, op1, op2);
2921         return Jump(makeBranch(cond));
2922     }
2923
2924     Jump branchAdd32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
2925     {
2926         if (isUInt12(imm.m_value)) {
2927             m_assembler.add<32, S>(dest, op1, UInt12(imm.m_value));
2928             return Jump(makeBranch(cond));
2929         }
2930         if (isUInt12(-imm.m_value)) {
2931             m_assembler.sub<32, S>(dest, op1, UInt12(-imm.m_value));
2932             return Jump(makeBranch(cond));
2933         }
2934
2935         signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
2936         return branchAdd32(cond, op1, dataTempRegister, dest);
2937     }
2938
2939     Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
2940     {
2941         load32(src, getCachedDataTempRegisterIDAndInvalidate());
2942         return branchAdd32(cond, dest, dataTempRegister, dest);
2943     }
2944
2945     Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
2946     {
2947         return branchAdd32(cond, dest, src, dest);
2948     }
2949
2950     Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
2951     {
2952         return branchAdd32(cond, dest, imm, dest);
2953     }
2954
2955     Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress address)
2956     {
2957         load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
2958
2959         if (isUInt12(imm.m_value)) {
2960             m_assembler.add<32, S>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
2961             store32(dataTempRegister, address.m_ptr);
2962         } else if (isUInt12(-imm.m_value)) {
2963             m_assembler.sub<32, S>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
2964             store32(dataTempRegister, address.m_ptr);
2965         } else {
2966             move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
2967             m_assembler.add<32, S>(dataTempRegister, dataTempRegister, memoryTempRegister);
2968             store32(dataTempRegister, address.m_ptr);
2969         }
2970
2971         return Jump(makeBranch(cond));
2972     }
2973
2974     Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, Address address)
2975     {
2976         load32(address, getCachedDataTempRegisterIDAndInvalidate());
2977
2978         if (isUInt12(imm.m_value))
2979             m_assembler.add<32, S>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
2980         else if (isUInt12(-imm.m_value))
2981             m_assembler.sub<32, S>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
2982         else {
2983             move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
2984             m_assembler.add<32, S>(dataTempRegister, dataTempRegister, memoryTempRegister);
2985         }
2986
2987         store32(dataTempRegister, address);
2988         return Jump(makeBranch(cond));
2989     }
2990
2991     Jump branchAdd64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
2992     {
2993         m_assembler.add<64, S>(dest, op1, op2);
2994         return Jump(makeBranch(cond));
2995     }
2996
2997     Jump branchAdd64(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
2998     {
2999         if (isUInt12(imm.m_value)) {
3000             m_assembler.add<64, S>(dest, op1, UInt12(imm.m_value));
3001             return Jump(makeBranch(cond));
3002         }
3003         if (isUInt12(-imm.m_value)) {
3004             m_assembler.sub<64, S>(dest, op1, UInt12(-imm.m_value));
3005             return Jump(makeBranch(cond));
3006         }
3007
3008         move(imm, getCachedDataTempRegisterIDAndInvalidate());
3009         return branchAdd64(cond, op1, dataTempRegister, dest);
3010     }
3011
3012     Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest)
3013     {
3014         return branchAdd64(cond, dest, src, dest);
3015     }
3016
3017     Jump branchAdd64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
3018     {
3019         return branchAdd64(cond, dest, imm, dest);
3020     }
3021
3022     Jump branchAdd64(RelationalCondition cond, TrustedImm32 imm, RegisterID dest)
3023     {
3024         ASSERT(isUInt12(imm.m_value));
3025         m_assembler.add<64, S>(dest, dest, UInt12(imm.m_value));
3026         return Jump(makeBranch(cond));
3027     }
3028
3029     Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID scratch1, RegisterID scratch2, RegisterID dest)
3030     {
3031         ASSERT(cond != Signed);
3032
3033         if (cond != Overflow) {
3034             m_assembler.mul<32>(dest, src1, src2);
3035             return branchTest32(cond, dest);
3036         }
3037
3038         // This is a signed multiple of two 32-bit values, producing a 64-bit result.
3039         m_assembler.smull(dest, src1, src2);
3040         // Copy bits 63..32 of the result to bits 31..0 of scratch1.
3041         m_assembler.asr<64>(scratch1, dest, 32);
3042         // Splat bit 31 of the result to bits 31..0 of scratch2.
3043         m_assembler.asr<32>(scratch2, dest, 31);
3044         // After a mul32 the top 32 bits of the register should be clear.
3045         zeroExtend32ToPtr(dest, dest);
3046         // Check that bits 31..63 of the original result were all equal.
3047         return branch32(NotEqual, scratch2, scratch1);
3048     }
3049
3050     Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
3051     {
3052         return branchMul32(cond, src1, src2, getCachedDataTempRegisterIDAndInvalidate(), getCachedMemoryTempRegisterIDAndInvalidate(), dest);
3053     }
3054
3055     Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
3056     {
3057         return branchMul32(cond, dest, src, dest);
3058     }
3059
3060     Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
3061     {
3062         move(imm, getCachedDataTempRegisterIDAndInvalidate());
3063         return branchMul32(cond, dataTempRegister, src, dest);
3064     }
3065
3066     Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID scratch1, RegisterID scratch2, RegisterID dest)
3067     {
3068         ASSERT(cond != Signed);
3069
3070         // This is a signed multiple of two 64-bit values, producing a 64-bit result.
3071         m_assembler.mul<64>(dest, src1, src2);
3072
3073         if (cond != Overflow)
3074             return branchTest64(cond, dest);
3075
3076         // Compute bits 127..64 of the result into scratch1.
3077         m_assembler.smulh(scratch1, src1, src2);
3078         // Splat bit 63 of the result to bits 63..0 of scratch2.
3079         m_assembler.asr<64>(scratch2, dest, 63);
3080         // Check that bits 31..63 of the original result were all equal.
3081         return branch64(NotEqual, scratch2, scratch1);
3082     }
3083
3084     Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
3085     {
3086         return branchMul64(cond, src1, src2, getCachedDataTempRegisterIDAndInvalidate(), getCachedMemoryTempRegisterIDAndInvalidate(), dest);
3087     }
3088
3089     Jump branchMul64(ResultCondition cond, RegisterID src, RegisterID dest)
3090     {
3091         return branchMul64(cond, dest, src, dest);
3092     }
3093
3094     Jump branchNeg32(ResultCondition cond, RegisterID dest)
3095     {
3096         m_assembler.neg<32, S>(dest, dest);
3097         return Jump(makeBranch(cond));
3098     }
3099
3100     Jump branchNeg64(ResultCondition cond, RegisterID srcDest)
3101     {
3102         m_assembler.neg<64, S>(srcDest, srcDest);
3103         return Jump(makeBranch(cond));
3104     }
3105
3106     Jump branchSub32(ResultCondition cond, RegisterID dest)
3107     {
3108         m_assembler.neg<32, S>(dest, dest);
3109         return Jump(makeBranch(cond));
3110     }
3111
3112     Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
3113     {
3114         m_assembler.sub<32, S>(dest, op1, op2);
3115         return Jump(makeBranch(cond));
3116     }
3117
3118     Jump branchSub32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
3119     {
3120         if (isUInt12(imm.m_value)) {
3121             m_assembler.sub<32, S>(dest, op1, UInt12(imm.m_value));
3122             return Jump(makeBranch(cond));
3123         }
3124         if (isUInt12(-imm.m_value)) {
3125             m_assembler.add<32, S>(dest, op1, UInt12(-imm.m_value));
3126             return Jump(makeBranch(cond));
3127         }
3128
3129         signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
3130         return branchSub32(cond, op1, dataTempRegister, dest);
3131     }
3132
3133     Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
3134     {
3135         return branchSub32(cond, dest, src, dest);
3136     }
3137
3138     Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
3139     {
3140         return branchSub32(cond, dest, imm, dest);
3141     }
3142
3143     Jump branchSub64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
3144     {
3145         m_assembler.sub<64, S>(dest, op1, op2);
3146         return Jump(makeBranch(cond));
3147     }
3148
3149     Jump branchSub64(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
3150     {
3151         if (isUInt12(imm.m_value)) {
3152             m_assembler.sub<64, S>(dest, op1, UInt12(imm.m_value));
3153             return Jump(makeBranch(cond));
3154         }
3155         if (isUInt12(-imm.m_value)) {
3156             m_assembler.add<64, S>(dest, op1, UInt12(-imm.m_value));
3157             return Jump(makeBranch(cond));
3158         }
3159
3160         move(imm, getCachedDataTempRegisterIDAndInvalidate());
3161         return branchSub64(cond, op1, dataTempRegister, dest);
3162     }
3163
3164     Jump branchSub64(ResultCondition cond, RegisterID src, RegisterID dest)
3165     {
3166         return branchSub64(cond, dest, src, dest);
3167     }
3168
3169     Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
3170     {
3171         return branchSub64(cond, dest, imm, dest);
3172     }
3173
3174     Jump branchSub64(RelationalCondition cond, TrustedImm32 imm, RegisterID dest)
3175     {
3176         ASSERT(isUInt12(imm.m_value));
3177         m_assembler.sub<64, S>(dest, dest, UInt12(imm.m_value));
3178         return Jump(makeBranch(cond));
3179     }
3180
3181
3182     // Jumps, calls, returns
3183
3184     ALWAYS_INLINE Call call(PtrTag)
3185     {
3186         AssemblerLabel pointerLabel = m_assembler.label();
3187         moveWithFixedWidth(TrustedImmPtr(nullptr), getCachedDataTempRegisterIDAndInvalidate());
3188         invalidateAllTempRegisters();
3189         m_assembler.blr(dataTempRegister);
3190         AssemblerLabel callLabel = m_assembler.label();
3191         ASSERT_UNUSED(pointerLabel, Assembler::getDifferenceBetweenLabels(callLabel, pointerLabel) == REPATCH_OFFSET_CALL_TO_POINTER);
3192         return Call(callLabel, Call::Linkable);
3193     }
3194
3195     ALWAYS_INLINE Call call(RegisterID target, PtrTag)
3196     {
3197         invalidateAllTempRegisters();
3198         m_assembler.blr(target);
3199         return Call(m_assembler.label(), Call::None);
3200     }
3201
3202     ALWAYS_INLINE Call call(Address address, PtrTag tag)
3203     {
3204         load64(address, getCachedDataTempRegisterIDAndInvalidate());
3205         return call(dataTempRegister, tag);
3206     }
3207
3208     ALWAYS_INLINE Call call(RegisterID callTag) { return UNUSED_PARAM(callTag), call(NoPtrTag); }
3209     ALWAYS_INLINE Call call(RegisterID target, RegisterID callTag) { return UNUSED_PARAM(callTag), call(target, NoPtrTag); }
3210     ALWAYS_INLINE Call call(Address address, RegisterID callTag) { return UNUSED_PARAM(callTag), call(address, NoPtrTag); }
3211
3212     ALWAYS_INLINE Jump jump()
3213     {
3214         AssemblerLabel label = m_assembler.label();
3215         m_assembler.b();
3216         return Jump(label, m_makeJumpPatchable ? Assembler::JumpNoConditionFixedSize : Assembler::JumpNoCondition);
3217     }
3218
3219     void farJump(RegisterID target, PtrTag)
3220     {
3221         m_assembler.br(target);
3222     }
3223
3224     void farJump(Address address, PtrTag)
3225     {
3226         load64(address, getCachedDataTempRegisterIDAndInvalidate());
3227         m_assembler.br(dataTempRegister);
3228     }
3229     
3230     void farJump(BaseIndex address, PtrTag)
3231     {
3232         load64(address, getCachedDataTempRegisterIDAndInvalidate());
3233         m_assembler.br(dataTempRegister);
3234     }
3235
3236     void farJump(AbsoluteAddress address, PtrTag)
3237     {
3238         move(TrustedImmPtr(address.m_ptr), getCachedDataTempRegisterIDAndInvalidate());
3239         load64(Address(dataTempRegister), dataTempRegister);
3240         m_assembler.br(dataTempRegister);
3241     }
3242
3243     ALWAYS_INLINE void farJump(RegisterID target, RegisterID jumpTag) { UNUSED_PARAM(jumpTag), farJump(target, NoPtrTag); }
3244     ALWAYS_INLINE void farJump(Address address, RegisterID jumpTag) { UNUSED_PARAM(jumpTag), farJump(address, NoPtrTag); }
3245     ALWAYS_INLINE void farJump(BaseIndex address, RegisterID jumpTag) { UNUSED_PARAM(jumpTag), farJump(address, NoPtrTag); }
3246     ALWAYS_INLINE void farJump(AbsoluteAddress address, RegisterID jumpTag) { UNUSED_PARAM(jumpTag), farJump(address, NoPtrTag); }
3247
3248     ALWAYS_INLINE Call nearCall()
3249     {
3250         invalidateAllTempRegisters();
3251         m_assembler.bl();
3252         return Call(m_assembler.label(), Call::LinkableNear);
3253     }
3254
3255     ALWAYS_INLINE Call nearTailCall()
3256     {
3257         AssemblerLabel label = m_assembler.label();
3258         m_assembler.b();
3259         return Call(label, Call::LinkableNearTail);
3260     }
3261
3262     ALWAYS_INLINE Call threadSafePatchableNearCall()
3263     {
3264         invalidateAllTempRegisters();
3265         m_assembler.bl();
3266         return Call(m_assembler.label(), Call::LinkableNear);
3267     }
3268
3269     ALWAYS_INLINE void ret()
3270     {
3271         m_assembler.ret();
3272     }
3273
3274     // Comparisons operations
3275
3276     void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
3277     {
3278         m_assembler.cmp<32>(left, right);
3279         m_assembler.cset<32>(dest, ARM64Condition(cond));
3280     }
3281
3282     void compare32(RelationalCondition cond, Address left, RegisterID right, RegisterID dest)
3283     {
3284         load32(left, getCachedDataTempRegisterIDAndInvalidate());
3285         m_assembler.cmp<32>(dataTempRegister, right);
3286         m_assembler.cset<32>(dest, ARM64Condition(cond));
3287     }
3288
3289     void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
3290     {
3291         if (!right.m_value) {
3292             if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
3293                 test32(*resultCondition, left, left, dest);
3294                 return;
3295             }
3296         }
3297
3298         if (isUInt12(right.m_value))
3299             m_assembler.cmp<32>(left, UInt12(right.m_value));
3300         else if (isUInt12(-right.m_value))
3301             m_assembler.cmn<32>(left, UInt12(-right.m_value));
3302         else {
3303             move(right, getCachedDataTempRegisterIDAndInvalidate());
3304             m_assembler.cmp<32>(left, dataTempRegister);
3305         }
3306         m_assembler.cset<32>(dest, ARM64Condition(cond));
3307     }
3308
3309     void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
3310     {
3311         m_assembler.cmp<64>(left, right);
3312         m_assembler.cset<32>(dest, ARM64Condition(cond));
3313     }
3314     
3315     void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
3316     {
3317         if (!right.m_value) {
3318             if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
3319                 test64(*resultCondition, left, left, dest);
3320                 return;
3321             }
3322         }
3323
3324         signExtend32ToPtr(right, getCachedDataTempRegisterIDAndInvalidate());
3325         m_assembler.cmp<64>(left, dataTempRegister);
3326         m_assembler.cset<32>(dest, ARM64Condition(cond));
3327     }
3328
3329     void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
3330     {
3331         TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
3332         MacroAssemblerHelpers::load8OnCondition(*this, cond, left, getCachedMemoryTempRegisterIDAndInvalidate());
3333         move(right8, getCachedDataTempRegisterIDAndInvalidate());
3334         compare32(cond, memoryTempRegister, dataTempRegister, dest);
3335     }
3336
3337     void test32(ResultCondition cond, RegisterID src, RegisterID mask, RegisterID dest)
3338     {
3339         m_assembler.tst<32>(src, mask);
3340         m_assembler.cset<32>(dest, ARM64Condition(cond));
3341     }
3342
3343     void test32(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest)
3344     {
3345         test32(src, mask);
3346         m_assembler.cset<32>(dest, ARM64Condition(cond));
3347     }
3348
3349     void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
3350     {
3351         load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
3352         test32(cond, memoryTempRegister, mask, dest);
3353     }
3354
3355     void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
3356     {
3357         TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
3358         MacroAssemblerHelpers::load8OnCondition(*this, cond, address, getCachedMemoryTempRegisterIDAndInvalidate());
3359         test32(cond, memoryTempRegister, mask8, dest);
3360     }
3361
3362     void test64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
3363     {
3364         m_assembler.tst<64>(op1, op2);
3365         m_assembler.cset<32>(dest, ARM64Condition(cond));
3366     }
3367
3368     void test64(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest)
3369     {
3370         if (mask.m_value == -1)
3371             m_assembler.tst<64>(src, src);
3372         else {
3373             signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
3374             m_assembler.tst<64>(src, dataTempRegister);
3375         }
3376         m_assembler.cset<32>(dest, ARM64Condition(cond));
3377     }
3378
3379     void setCarry(RegisterID dest)
3380     {
3381         m_assembler.cset<32>(dest, Assembler::ConditionCS);
3382     }
3383
3384     // Patchable operations
3385
3386     ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dest)
3387     {
3388         DataLabel32 label(this);
3389         moveWithFixedWidth(imm, dest);
3390         return label;
3391     }
3392
3393     ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dest)
3394     {
3395         DataLabelPtr label(this);
3396         moveWithFixedWidth(imm, dest);
3397         return label;
3398     }
3399
3400     ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(nullptr))
3401     {
3402         dataLabel = DataLabelPtr(this);
3403         moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
3404         return branch64(cond, left, dataTempRegister);
3405     }
3406
3407     ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(nullptr))
3408     {
3409         dataLabel = DataLabelPtr(this);
3410         moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
3411         return branch64(cond, left, dataTempRegister);
3412     }
3413
3414     ALWAYS_INLINE Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
3415     {
3416         dataLabel = DataLabel32(this);
3417         moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
3418         return branch32(cond, left, dataTempRegister);
3419     }
3420
3421     PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
3422     {
3423         m_makeJumpPatchable = true;
3424         Jump result = branch64(cond, left, TrustedImm64(right));
3425         m_makeJumpPatchable = false;
3426         return PatchableJump(result);
3427     }
3428
3429     PatchableJump patchableBranch8(RelationalCondition cond, Address left, TrustedImm32 imm)
3430     {
3431         m_makeJumpPatchable = true;
3432         Jump result = branch8(cond, left, imm);
3433         m_makeJumpPatchable = false;
3434         return PatchableJump(result);
3435     }
3436
3437     PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
3438     {
3439         m_makeJumpPatchable = true;
3440         Jump result = branchTest32(cond, reg, mask);
3441         m_makeJumpPatchable = false;
3442         return PatchableJump(result);
3443     }
3444
3445     PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
3446     {
3447         m_makeJumpPatchable = true;
3448         Jump result = branch32(cond, reg, imm);
3449         m_makeJumpPatchable = false;
3450         return PatchableJump(result);
3451     }
3452
3453     PatchableJump patchableBranch32(RelationalCondition cond, Address left, TrustedImm32 imm)
3454     {
3455         m_makeJumpPatchable = true;
3456         Jump result = branch32(cond, left, imm);
3457         m_makeJumpPatchable = false;
3458         return PatchableJump(result);
3459     }
3460
3461     PatchableJump patchableBranch64(RelationalCondition cond, RegisterID reg, TrustedImm64 imm)
3462     {
3463         m_makeJumpPatchable = true;
3464         Jump result = branch64(cond, reg, imm);
3465         m_makeJumpPatchable = false;
3466         return PatchableJump(result);
3467     }
3468
3469     PatchableJump patchableBranch64(RelationalCondition cond, RegisterID left, RegisterID right)
3470     {
3471         m_makeJumpPatchable = true;
3472         Jump result = branch64(cond, left, right);
3473         m_makeJumpPatchable = false;
3474         return PatchableJump(result);
3475     }
3476
3477     PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(nullptr))
3478     {
3479         m_makeJumpPatchable = true;
3480         Jump result = branchPtrWithPatch(cond, left, dataLabel, initialRightValue);
3481         m_makeJumpPatchable = false;
3482         return PatchableJump(result);
3483     }
3484
3485     PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
3486     {
3487         m_makeJumpPatchable = true;
3488         Jump result = branch32WithPatch(cond, left, dataLabel, initialRightValue);
3489         m_makeJumpPatchable = false;
3490         return PatchableJump(result);
3491     }
3492
3493     PatchableJump patchableJump()
3494     {
3495         m_makeJumpPatchable = true;
3496         Jump result = jump();
3497         m_makeJumpPatchable = false;
3498         return PatchableJump(result);
3499     }
3500
3501     ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
3502     {
3503         DataLabelPtr label(this);
3504         moveWithFixedWidth(initialValue, getCachedDataTempRegisterIDAndInvalidate());
3505         store64(dataTempRegister, address);
3506         return label;
3507     }
3508
3509     ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address)
3510     {
3511         return storePtrWithPatch(TrustedImmPtr(nullptr), address);
3512     }
3513
3514     static void reemitInitialMoveWithPatch(void* address, void* value)
3515     {
3516         Assembler::setPointer(static_cast<int*>(address), value, dataTempRegister, true);
3517     }
3518
3519     // Miscellaneous operations:
3520
3521     void breakpoint(uint16_t imm = 0)
3522     {
3523         m_assembler.brk(imm);
3524     }
3525
3526     static bool isBreakpoint(void* address) { return Assembler::isBrk(address); }
3527
3528     void nop()
3529     {
3530         m_assembler.nop();
3531     }
3532     
3533     // We take memoryFence to mean acqrel. This has acqrel semantics on ARM64.
3534     void memoryFence()
3535     {
3536         m_assembler.dmbISH();
3537     }
3538
3539     // We take this to mean that it prevents motion of normal stores. That's a store fence on ARM64 (hence the "ST").
3540     void storeFence()
3541     {
3542         m_assembler.dmbISHST();
3543     }
3544
3545     // We take this to mean that it prevents motion of normal loads. Ideally we'd have expressed this
3546     // using dependencies or half fences, but there are cases where this is as good as it gets. The only
3547     // way to get a standalone load fence instruction on ARM is to use the ISH fence, which is just like
3548     // the memoryFence().
3549     void loadFence()
3550     {
3551         m_assembler.dmbISH();
3552     }
3553     
3554     void loadAcq8SignedExtendTo32(ImplicitAddress address, RegisterID dest)
3555     {
3556         m_assembler.ldar<8>(dest, extractSimpleAddress(address));
3557     }
3558     
3559     void loadAcq8(ImplicitAddress address, RegisterID dest)
3560     {
3561         loadAcq8SignedExtendTo32(address, dest);
3562         and32(TrustedImm32(0xff), dest);
3563     }
3564     
3565     void storeRel8(RegisterID src, ImplicitAddress address)
3566     {
3567         m_assembler.stlr<8>(src, extractSimpleAddress(address));
3568     }
3569     
3570     void loadAcq16SignedExtendTo32(ImplicitAddress address, RegisterID dest)
3571     {
3572         m_assembler.ldar<16>(dest, extractSimpleAddress(address));
3573     }
3574     
3575     void loadAcq16(ImplicitAddress address, RegisterID dest)
3576     {
3577         loadAcq16SignedExtendTo32(address, dest);
3578         and32(TrustedImm32(0xffff), dest);
3579     }
3580     
3581     void storeRel16(RegisterID src, ImplicitAddress address)
3582     {
3583         m_assembler.stlr<16>(src, extractSimpleAddress(address));
3584     }
3585     
3586     void loadAcq32(ImplicitAddress address, RegisterID dest)
3587     {
3588         m_assembler.ldar<32>(dest, extractSimpleAddress(address));
3589     }
3590     
3591     void loadAcq64(ImplicitAddress address, RegisterID dest)
3592     {
3593         m_assembler.ldar<64>(dest, extractSimpleAddress(address));
3594     }
3595     
3596     void storeRel32(RegisterID dest, ImplicitAddress address)
3597     {
3598         m_assembler.stlr<32>(dest, extractSimpleAddress(address));
3599     }
3600     
3601     void storeRel64(RegisterID dest, ImplicitAddress address)
3602     {
3603         m_assembler.stlr<64>(dest, extractSimpleAddress(address));
3604     }
3605     
3606     void loadLink8(ImplicitAddress address, RegisterID dest)
3607     {
3608         m_assembler.ldxr<8>(dest, extractSimpleAddress(address));
3609     }
3610     
3611     void loadLinkAcq8(ImplicitAddress address, RegisterID dest)
3612     {
3613         m_assembler.ldaxr<8>(dest, extractSimpleAddress(address));
3614     }
3615     
3616     void storeCond8(RegisterID src, ImplicitAddress address, RegisterID result)
3617     {
3618         m_assembler.stxr<8>(result, src, extractSimpleAddress(address));
3619     }
3620     
3621     void storeCondRel8(RegisterID src, ImplicitAddress address, RegisterID result)
3622     {
3623         m_assembler.stlxr<8>(result, src, extractSimpleAddress(address));
3624     }
3625     
3626     void loadLink16(ImplicitAddress address, RegisterID dest)
3627     {
3628         m_assembler.ldxr<16>(dest, extractSimpleAddress(address));
3629     }
3630     
3631     void loadLinkAcq16(ImplicitAddress address, RegisterID dest)
3632     {
3633         m_assembler.ldaxr<16>(dest, extractSimpleAddress(address));
3634     }
3635     
3636     void storeCond16(RegisterID src, ImplicitAddress address, RegisterID result)
3637     {
3638         m_assembler.stxr<16>(result, src, extractSimpleAddress(address));
3639     }
3640     
3641     void storeCondRel16(RegisterID src, ImplicitAddress address, RegisterID result)
3642     {
3643         m_assembler.stlxr<16>(result, src, extractSimpleAddress(address));
3644     }
3645     
3646     void loadLink32(ImplicitAddress address, RegisterID dest)
3647     {
3648         m_assembler.ldxr<32>(dest, extractSimpleAddress(address));
3649     }
3650     
3651     void loadLinkAcq32(ImplicitAddress address, RegisterID dest)
3652     {
3653         m_assembler.ldaxr<32>(dest, extractSimpleAddress(address));
3654     }
3655     
3656     void storeCond32(RegisterID src, ImplicitAddress address, RegisterID result)
3657     {
3658         m_assembler.stxr<32>(result, src, extractSimpleAddress(address));
3659     }
3660     
3661     void storeCondRel32(RegisterID src, ImplicitAddress address, RegisterID result)
3662     {
3663         m_assembler.stlxr<32>(result, src, extractSimpleAddress(address));
3664     }
3665     
3666     void loadLink64(ImplicitAddress address, RegisterID dest)
3667     {
3668         m_assembler.ldxr<64>(dest, extractSimpleAddress(address));
3669     }
3670     
3671     void loadLinkAcq64(ImplicitAddress address, RegisterID dest)
3672     {
3673         m_assembler.ldaxr<64>(dest, extractSimpleAddress(address));
3674     }
3675     
3676     void storeCond64(RegisterID src, ImplicitAddress address, RegisterID result)
3677     {
3678         m_assembler.stxr<64>(result, src, extractSimpleAddress(address));
3679     }
3680     
3681     void storeCondRel64(RegisterID src, ImplicitAddress address, RegisterID result)
3682     {
3683         m_assembler.stlxr<64>(result, src, extractSimpleAddress(address));
3684     }
3685     
3686     template<typename AddressType>
3687     void atomicStrongCAS8(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, AddressType address, RegisterID result)
3688     {
3689         atomicStrongCAS<8>(cond, expectedAndResult, newValue, address, result);
3690     }
3691     
3692     template<typename AddressType>
3693     void atomicStrongCAS16(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, AddressType address, RegisterID result)
3694     {
3695         atomicStrongCAS<16>(cond, expectedAndResult, newValue, address, result);
3696     }
3697     
3698     template<typename AddressType>
3699     void atomicStrongCAS32(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, AddressType address, RegisterID result)
3700     {
3701         atomicStrongCAS<32>(cond, expectedAndResult, newValue, address, result);
3702     }
3703     
3704     template<typename AddressType>
3705     void atomicStrongCAS64(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, AddressType address, RegisterID result)
3706     {
3707         atomicStrongCAS<64>(cond, expectedAndResult, newValue, address, result);
3708     }
3709     
3710     template<typename AddressType>
3711     void atomicRelaxedStrongCAS8(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, AddressType address, RegisterID result)
3712     {
3713         atomicRelaxedStrongCAS<8>(cond, expectedAndResult, newValue, address, result);
3714     }
3715     
3716     template<typename AddressType>
3717     void atomicRelaxedStrongCAS16(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, AddressType address, RegisterID result)
3718     {
3719         atomicRelaxedStrongCAS<16>(cond, expectedAndResult, newValue, address, result);
3720     }
3721     
3722     template<typename AddressType>
3723     void atomicRelaxedStrongCAS32(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, AddressType address, RegisterID result)
3724     {
3725         atomicRelaxedStrongCAS<32>(cond, expectedAndResult, newValue, address, result);
3726     }
3727     
3728     template<typename AddressType>
3729     void atomicRelaxedStrongCAS64(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, AddressType address, RegisterID result)
3730     {
3731         atomicRelaxedStrongCAS<64>(cond, expectedAndResult, newValue, address, result);
3732     }
3733     
3734     template<typename AddressType>
3735     JumpList branchAtomicWeakCAS8(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, AddressType address)
3736     {
3737         return branchAtomicWeakCAS<8>(cond, expectedAndClobbered, newValue, address);
3738     }
3739     
3740     template<typename AddressType>
3741     JumpList branchAtomicWeakCAS16(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, AddressType address)
3742     {
3743         return branchAtomicWeakCAS<16>(cond, expectedAndClobbered, newValue, address);
3744     }
3745     
3746     template<typename AddressType>
3747     JumpList branchAtomicWeakCAS32(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, AddressType address)
3748     {
3749         return branchAtomicWeakCAS<32>(cond, expectedAndClobbered, newValue, address);
3750     }
3751     
3752     template<typename AddressType>
3753     JumpList branchAtomicWeakCAS64(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, AddressType address)
3754     {
3755         return branchAtomicWeakCAS<64>(cond, expectedAndClobbered, newValue, address);
3756     }
3757     
3758     template<typename AddressType>
3759     JumpList branchAtomicRelaxedWeakCAS8(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, AddressType address)
3760     {
3761         return branchAtomicRelaxedWeakCAS<8>(cond, expectedAndClobbered, newValue, address);
3762     }
3763     
3764     template<typename AddressType>
3765     JumpList branchAtomicRelaxedWeakCAS16(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, AddressType address)
3766     {
3767         return branchAtomicRelaxedWeakCAS<16>(cond, expectedAndClobbered, newValue, address);
3768     }
3769     
3770     template<typename AddressType>
3771     JumpList branchAtomicRelaxedWeakCAS32(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, AddressType address)
3772     {
3773         return branchAtomicRelaxedWeakCAS<32>(cond, expectedAndClobbered, newValue, address);
3774     }
3775     
3776     template<typename AddressType>
3777     JumpList branchAtomicRelaxedWeakCAS64(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, AddressType address)
3778     {
3779         return branchAtomicRelaxedWeakCAS<64>(cond, expectedAndClobbered, newValue, address);
3780     }
3781     
3782     void depend32(RegisterID src, RegisterID dest)
3783     {
3784         m_assembler.eor<32>(dest, src, src);
3785     }
3786     
3787     void depend64(RegisterID src, RegisterID dest)
3788     {
3789         m_assembler.eor<64>(dest, src, src);
3790     }
3791
3792     ALWAYS_INLINE static bool supportsDoubleToInt32ConversionUsingJavaScriptSemantics()
3793     {
3794 #if HAVE(FJCVTZS_INSTRUCTION)
3795         return true;
3796 #else
3797         if (s_jscvtCheckState == CPUIDCheckState::NotChecked)
3798             collectCPUFeatures();
3799
3800         return s_jscvtCheckState == CPUIDCheckState::Set;
3801 #endif
3802     }
3803
3804     void convertDoubleToInt32UsingJavaScriptSemantics(FPRegisterID src, RegisterID dest)
3805     {
3806         m_assembler.fjcvtzs(dest, src); // This zero extends.
3807     }
3808     
3809 #if ENABLE(FAST_TLS_JIT)
3810     // This will use scratch registers if the offset is not legal.
3811
3812     void loadFromTLS32(uint32_t offset, RegisterID dst)
3813     {
3814         m_assembler.mrs_TPIDRRO_EL0(dst);
3815         and64(TrustedImm32(~7), dst);
3816         load32(Address(dst, offset), dst);
3817     }
3818     
3819     void loadFromTLS64(uint32_t offset, RegisterID dst)
3820     {
3821         m_assembler.mrs_TPIDRRO_EL0(dst);
3822         and64(TrustedImm32(~7), dst);
3823         load64(Address(dst, offset), dst);
3824     }
3825
3826     static bool loadFromTLSPtrNeedsMacroScratchRegister()
3827     {
3828         return true;
3829     }
3830
3831     void storeToTLS32(RegisterID src, uint32_t offset)
3832     {
3833         RegisterID tmp = getCachedDataTempRegisterIDAndInvalidate();
3834         ASSERT(src != tmp);
3835         m_assembler.mrs_TPIDRRO_EL0(tmp);
3836         and64(TrustedImm32(~7), tmp);
3837         store32(src, Address(tmp, offset));
3838     }
3839     
3840     void storeToTLS64(RegisterID src, uint32_t offset)
3841     {
3842         RegisterID tmp = getCachedDataTempRegisterIDAndInvalidate();
3843         ASSERT(src != tmp);
3844         m_assembler.mrs_TPIDRRO_EL0(tmp);
3845         and64(TrustedImm32(~7), tmp);