[armv7][arm64] Speculative build fix after r159545.
[WebKit-https.git] / Source / JavaScriptCore / assembler / MacroAssemblerARM64.h
1 /*
2  * Copyright (C) 2012 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #ifndef MacroAssemblerARM64_h
27 #define MacroAssemblerARM64_h
28
29 #if ENABLE(ASSEMBLER)
30
31 #include "ARM64Assembler.h"
32 #include "AbstractMacroAssembler.h"
33 #include <wtf/MathExtras.h>
34
35 namespace JSC {
36
37 class MacroAssemblerARM64 : public AbstractMacroAssembler<ARM64Assembler> {
38     static const RegisterID dataTempRegister = ARM64Registers::ip0;
39     static const RegisterID memoryTempRegister = ARM64Registers::ip1;
40     static const ARM64Registers::FPRegisterID fpTempRegister = ARM64Registers::q31;
41     static const ARM64Assembler::SetFlags S = ARM64Assembler::S;
42     static const intptr_t maskHalfWord0 = 0xffffl;
43     static const intptr_t maskHalfWord1 = 0xffff0000l;
44     static const intptr_t maskUpperWord = 0xffffffff00000000l;
45
46     // 4 instructions - 3 to load the function pointer, + blr.
47     static const ptrdiff_t REPATCH_OFFSET_CALL_TO_POINTER = -16;
48     
49 public:
50     MacroAssemblerARM64()
51         : m_dataMemoryTempRegister(this, dataTempRegister)
52         , m_cachedMemoryTempRegister(this, memoryTempRegister)
53         , m_makeJumpPatchable(false)
54     {
55     }
56
57     typedef ARM64Assembler::LinkRecord LinkRecord;
58     typedef ARM64Assembler::JumpType JumpType;
59     typedef ARM64Assembler::JumpLinkType JumpLinkType;
60     typedef ARM64Assembler::Condition Condition;
61
62     static const ARM64Assembler::Condition DefaultCondition = ARM64Assembler::ConditionInvalid;
63     static const ARM64Assembler::JumpType DefaultJump = ARM64Assembler::JumpNoConditionFixedSize;
64
65     Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); }
66     void* unlinkedCode() { return m_assembler.unlinkedCode(); }
67     bool canCompact(JumpType jumpType) { return m_assembler.canCompact(jumpType); }
68     JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(jumpType, from, to); }
69     JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(record, from, to); }
70     void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); }
71     int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return m_assembler.jumpSizeDelta(jumpType, jumpLinkType); }
72     void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return m_assembler.link(record, from, to); }
73     int executableOffsetFor(int location) { return m_assembler.executableOffsetFor(location); }
74
75     static const Scale ScalePtr = TimesEight;
76
77     static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
78     {
79         // This is the largest 32-bit access allowed, aligned to 64-bit boundary.
80         return !(value & ~0x3ff8);
81     }
82
83     enum RelationalCondition {
84         Equal = ARM64Assembler::ConditionEQ,
85         NotEqual = ARM64Assembler::ConditionNE,
86         Above = ARM64Assembler::ConditionHI,
87         AboveOrEqual = ARM64Assembler::ConditionHS,
88         Below = ARM64Assembler::ConditionLO,
89         BelowOrEqual = ARM64Assembler::ConditionLS,
90         GreaterThan = ARM64Assembler::ConditionGT,
91         GreaterThanOrEqual = ARM64Assembler::ConditionGE,
92         LessThan = ARM64Assembler::ConditionLT,
93         LessThanOrEqual = ARM64Assembler::ConditionLE
94     };
95
96     enum ResultCondition {
97         Overflow = ARM64Assembler::ConditionVS,
98         Signed = ARM64Assembler::ConditionMI,
99         PositiveOrZero = ARM64Assembler::ConditionPL,
100         Zero = ARM64Assembler::ConditionEQ,
101         NonZero = ARM64Assembler::ConditionNE
102     };
103
104     enum ZeroCondition {
105         IsZero = ARM64Assembler::ConditionEQ,
106         IsNonZero = ARM64Assembler::ConditionNE
107     };
108
109     enum DoubleCondition {
110         // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
111         DoubleEqual = ARM64Assembler::ConditionEQ,
112         DoubleNotEqual = ARM64Assembler::ConditionVC, // Not the right flag! check for this & handle differently.
113         DoubleGreaterThan = ARM64Assembler::ConditionGT,
114         DoubleGreaterThanOrEqual = ARM64Assembler::ConditionGE,
115         DoubleLessThan = ARM64Assembler::ConditionLO,
116         DoubleLessThanOrEqual = ARM64Assembler::ConditionLS,
117         // If either operand is NaN, these conditions always evaluate to true.
118         DoubleEqualOrUnordered = ARM64Assembler::ConditionVS, // Not the right flag! check for this & handle differently.
119         DoubleNotEqualOrUnordered = ARM64Assembler::ConditionNE,
120         DoubleGreaterThanOrUnordered = ARM64Assembler::ConditionHI,
121         DoubleGreaterThanOrEqualOrUnordered = ARM64Assembler::ConditionHS,
122         DoubleLessThanOrUnordered = ARM64Assembler::ConditionLT,
123         DoubleLessThanOrEqualOrUnordered = ARM64Assembler::ConditionLE,
124     };
125
126     static const RegisterID stackPointerRegister = ARM64Registers::sp;
127     static const RegisterID framePointerRegister = ARM64Registers::fp;
128     static const RegisterID linkRegister = ARM64Registers::lr;
129
130     // FIXME: Get reasonable implementations for these
131     static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
132     static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; }
133     static bool shouldBlindForSpecificArch(uintptr_t value) { return value >= 0x00ffffff; }
134
135     // Integer operations:
136
137     void add32(RegisterID src, RegisterID dest)
138     {
139         m_assembler.add<32>(dest, dest, src);
140     }
141
142     void add32(TrustedImm32 imm, RegisterID dest)
143     {
144         add32(imm, dest, dest);
145     }
146
147     void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
148     {
149         if (isUInt12(imm.m_value))
150             m_assembler.add<32>(dest, src, UInt12(imm.m_value));
151         else if (isUInt12(-imm.m_value))
152             m_assembler.sub<32>(dest, src, UInt12(-imm.m_value));
153         else {
154             move(imm, getCachedDataTempRegisterIDAndInvalidate());
155             m_assembler.add<32>(dest, src, dataTempRegister);
156         }
157     }
158
159     void add32(TrustedImm32 imm, Address address)
160     {
161         load32(address, getCachedDataTempRegisterIDAndInvalidate());
162
163         if (isUInt12(imm.m_value))
164             m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
165         else if (isUInt12(-imm.m_value))
166             m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
167         else {
168             move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
169             m_assembler.add<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
170         }
171
172         store32(dataTempRegister, address);
173     }
174
175     void add32(TrustedImm32 imm, AbsoluteAddress address)
176     {
177         load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
178
179         if (isUInt12(imm.m_value)) {
180             m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
181             store32(dataTempRegister, address.m_ptr);
182             return;
183         }
184
185         if (isUInt12(-imm.m_value)) {
186             m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
187             store32(dataTempRegister, address.m_ptr);
188             return;
189         }
190
191         move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
192         m_assembler.add<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
193         store32(dataTempRegister, address.m_ptr);
194     }
195
196     void add32(Address src, RegisterID dest)
197     {
198         load32(src, getCachedDataTempRegisterIDAndInvalidate());
199         add32(dataTempRegister, dest);
200     }
201
202     void add64(RegisterID src, RegisterID dest)
203     {
204         m_assembler.add<64>(dest, dest, src);
205     }
206
207     void add64(TrustedImm32 imm, RegisterID dest)
208     {
209         if (isUInt12(imm.m_value)) {
210             m_assembler.add<64>(dest, dest, UInt12(imm.m_value));
211             return;
212         }
213         if (isUInt12(-imm.m_value)) {
214             m_assembler.sub<64>(dest, dest, UInt12(-imm.m_value));
215             return;
216         }
217
218         signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
219         m_assembler.add<64>(dest, dest, dataTempRegister);
220     }
221
222     void add64(TrustedImm64 imm, RegisterID dest)
223     {
224         intptr_t immediate = imm.m_value;
225
226         if (isUInt12(immediate)) {
227             m_assembler.add<64>(dest, dest, UInt12(static_cast<int32_t>(immediate)));
228             return;
229         }
230         if (isUInt12(-immediate)) {
231             m_assembler.sub<64>(dest, dest, UInt12(static_cast<int32_t>(-immediate)));
232             return;
233         }
234
235         move(imm, getCachedDataTempRegisterIDAndInvalidate());
236         m_assembler.add<64>(dest, dest, dataTempRegister);
237     }
238
239     void add64(TrustedImm32 imm, RegisterID src, RegisterID dest)
240     {
241         if (isUInt12(imm.m_value)) {
242             m_assembler.add<64>(dest, src, UInt12(imm.m_value));
243             return;
244         }
245         if (isUInt12(-imm.m_value)) {
246             m_assembler.sub<64>(dest, src, UInt12(-imm.m_value));
247             return;
248         }
249
250         signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
251         m_assembler.add<64>(dest, src, dataTempRegister);
252     }
253
254     void add64(TrustedImm32 imm, Address address)
255     {
256         load64(address, getCachedDataTempRegisterIDAndInvalidate());
257
258         if (isUInt12(imm.m_value))
259             m_assembler.add<64>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
260         else if (isUInt12(-imm.m_value))
261             m_assembler.sub<64>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
262         else {
263             signExtend32ToPtr(imm, getCachedMemoryTempRegisterIDAndInvalidate());
264             m_assembler.add<64>(dataTempRegister, dataTempRegister, memoryTempRegister);
265         }
266
267         store64(dataTempRegister, address);
268     }
269
270     void add64(TrustedImm32 imm, AbsoluteAddress address)
271     {
272         load64(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
273
274         if (isUInt12(imm.m_value)) {
275             m_assembler.add<64>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
276             store64(dataTempRegister, address.m_ptr);
277             return;
278         }
279
280         if (isUInt12(-imm.m_value)) {
281             m_assembler.sub<64>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
282             store64(dataTempRegister, address.m_ptr);
283             return;
284         }
285
286         signExtend32ToPtr(imm, getCachedMemoryTempRegisterIDAndInvalidate());
287         m_assembler.add<64>(dataTempRegister, dataTempRegister, memoryTempRegister);
288         store64(dataTempRegister, address.m_ptr);
289     }
290
291     void add64(Address src, RegisterID dest)
292     {
293         load64(src, getCachedDataTempRegisterIDAndInvalidate());
294         m_assembler.add<64>(dest, dest, dataTempRegister);
295     }
296
297     void add64(AbsoluteAddress src, RegisterID dest)
298     {
299         load64(src.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
300         m_assembler.add<64>(dest, dest, dataTempRegister);
301     }
302
303     void and32(RegisterID src, RegisterID dest)
304     {
305         and32(dest, src, dest);
306     }
307
308     void and32(RegisterID op1, RegisterID op2, RegisterID dest)
309     {
310         m_assembler.and_<32>(dest, op1, op2);
311     }
312
313     void and32(TrustedImm32 imm, RegisterID dest)
314     {
315         and32(imm, dest, dest);
316     }
317
318     void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
319     {
320         LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
321
322         if (logicalImm.isValid()) {
323             m_assembler.and_<32>(dest, src, logicalImm);
324             return;
325         }
326
327         move(imm, getCachedDataTempRegisterIDAndInvalidate());
328         m_assembler.and_<32>(dest, src, dataTempRegister);
329     }
330
331     void and32(Address src, RegisterID dest)
332     {
333         load32(src, dataTempRegister);
334         and32(dataTempRegister, dest);
335     }
336
337     void and64(RegisterID src, RegisterID dest)
338     {
339         m_assembler.and_<64>(dest, dest, src);
340     }
341
342     void and64(TrustedImm32 imm, RegisterID dest)
343     {
344         LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
345
346         if (logicalImm.isValid()) {
347             m_assembler.and_<64>(dest, dest, logicalImm);
348             return;
349         }
350
351         signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
352         m_assembler.and_<64>(dest, dest, dataTempRegister);
353     }
354     
355     void countLeadingZeros32(RegisterID src, RegisterID dest)
356     {
357         m_assembler.clz<32>(dest, src);
358     }
359
360     void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
361     {
362         m_assembler.lsl<32>(dest, src, shiftAmount);
363     }
364
365     void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
366     {
367         m_assembler.lsl<32>(dest, src, imm.m_value & 0x1f);
368     }
369
370     void lshift32(RegisterID shiftAmount, RegisterID dest)
371     {
372         lshift32(dest, shiftAmount, dest);
373     }
374
375     void lshift32(TrustedImm32 imm, RegisterID dest)
376     {
377         lshift32(dest, imm, dest);
378     }
379
380     void lshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
381     {
382         m_assembler.lsl<64>(dest, src, shiftAmount);
383     }
384
385     void lshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
386     {
387         m_assembler.lsl<64>(dest, src, imm.m_value & 0x3f);
388     }
389
390     void lshift64(RegisterID shiftAmount, RegisterID dest)
391     {
392         lshift64(dest, shiftAmount, dest);
393     }
394
395     void lshift64(TrustedImm32 imm, RegisterID dest)
396     {
397         lshift64(dest, imm, dest);
398     }
399     
400     void mul32(RegisterID src, RegisterID dest)
401     {
402         m_assembler.mul<32>(dest, dest, src);
403     }
404     
405     void mul64(RegisterID src, RegisterID dest)
406     {
407         m_assembler.mul<64>(dest, dest, src);
408     }
409
410     void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
411     {
412         move(imm, getCachedDataTempRegisterIDAndInvalidate());
413         m_assembler.mul<32>(dest, src, dataTempRegister);
414     }
415
416     void neg32(RegisterID dest)
417     {
418         m_assembler.neg<32>(dest, dest);
419     }
420
421     void neg64(RegisterID dest)
422     {
423         m_assembler.neg<64>(dest, dest);
424     }
425
426     void or32(RegisterID src, RegisterID dest)
427     {
428         or32(dest, src, dest);
429     }
430
431     void or32(RegisterID op1, RegisterID op2, RegisterID dest)
432     {
433         m_assembler.orr<32>(dest, op1, op2);
434     }
435
436     void or32(TrustedImm32 imm, RegisterID dest)
437     {
438         or32(imm, dest, dest);
439     }
440
441     void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
442     {
443         LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
444
445         if (logicalImm.isValid()) {
446             m_assembler.orr<32>(dest, src, logicalImm);
447             return;
448         }
449
450         move(imm, getCachedDataTempRegisterIDAndInvalidate());
451         m_assembler.orr<32>(dest, src, dataTempRegister);
452     }
453
454     void or32(RegisterID src, AbsoluteAddress address)
455     {
456         load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
457         m_assembler.orr<32>(dataTempRegister, dataTempRegister, src);
458         store32(dataTempRegister, address.m_ptr);
459     }
460
461     void or64(RegisterID src, RegisterID dest)
462     {
463         or64(dest, src, dest);
464     }
465
466     void or64(RegisterID op1, RegisterID op2, RegisterID dest)
467     {
468         m_assembler.orr<64>(dest, op1, op2);
469     }
470
471     void or64(TrustedImm32 imm, RegisterID dest)
472     {
473         or64(imm, dest, dest);
474     }
475
476     void or64(TrustedImm32 imm, RegisterID src, RegisterID dest)
477     {
478         LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
479
480         if (logicalImm.isValid()) {
481             m_assembler.orr<64>(dest, dest, logicalImm);
482             return;
483         }
484
485         signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
486         m_assembler.orr<64>(dest, src, dataTempRegister);
487     }
488     
489     void or64(TrustedImm64 imm, RegisterID dest)
490     {
491         LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
492
493         if (logicalImm.isValid()) {
494             m_assembler.orr<64>(dest, dest, logicalImm);
495             return;
496         }
497
498         move(imm, getCachedDataTempRegisterIDAndInvalidate());
499         m_assembler.orr<64>(dest, dest, dataTempRegister);
500     }
501
502     void rotateRight64(TrustedImm32 imm, RegisterID srcDst)
503     {
504         m_assembler.ror<64>(srcDst, srcDst, imm.m_value & 63);
505     }
506
507     void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
508     {
509         m_assembler.asr<32>(dest, src, shiftAmount);
510     }
511
512     void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
513     {
514         m_assembler.asr<32>(dest, src, imm.m_value & 0x1f);
515     }
516
517     void rshift32(RegisterID shiftAmount, RegisterID dest)
518     {
519         rshift32(dest, shiftAmount, dest);
520     }
521     
522     void rshift32(TrustedImm32 imm, RegisterID dest)
523     {
524         rshift32(dest, imm, dest);
525     }
526     
527     void rshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
528     {
529         m_assembler.lsr<64>(dest, src, shiftAmount);
530     }
531     
532     void rshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
533     {
534         m_assembler.lsr<64>(dest, src, imm.m_value & 0x3f);
535     }
536     
537     void rshift64(RegisterID shiftAmount, RegisterID dest)
538     {
539         rshift64(dest, shiftAmount, dest);
540     }
541     
542     void rshift64(TrustedImm32 imm, RegisterID dest)
543     {
544         rshift64(dest, imm, dest);
545     }
546
547     void sub32(RegisterID src, RegisterID dest)
548     {
549         m_assembler.sub<32>(dest, dest, src);
550     }
551
552     void sub32(TrustedImm32 imm, RegisterID dest)
553     {
554         if (isUInt12(imm.m_value)) {
555             m_assembler.sub<32>(dest, dest, UInt12(imm.m_value));
556             return;
557         }
558         if (isUInt12(-imm.m_value)) {
559             m_assembler.add<32>(dest, dest, UInt12(-imm.m_value));
560             return;
561         }
562
563         move(imm, getCachedDataTempRegisterIDAndInvalidate());
564         m_assembler.sub<32>(dest, dest, dataTempRegister);
565     }
566
567     void sub32(TrustedImm32 imm, Address address)
568     {
569         load32(address, getCachedDataTempRegisterIDAndInvalidate());
570
571         if (isUInt12(imm.m_value))
572             m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
573         else if (isUInt12(-imm.m_value))
574             m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
575         else {
576             move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
577             m_assembler.sub<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
578         }
579
580         store32(dataTempRegister, address);
581     }
582
583     void sub32(TrustedImm32 imm, AbsoluteAddress address)
584     {
585         load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
586
587         if (isUInt12(imm.m_value)) {
588             m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
589             store32(dataTempRegister, address.m_ptr);
590             return;
591         }
592
593         if (isUInt12(-imm.m_value)) {
594             m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
595             store32(dataTempRegister, address.m_ptr);
596             return;
597         }
598
599         move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
600         m_assembler.sub<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
601         store32(dataTempRegister, address.m_ptr);
602     }
603
604     void sub32(Address src, RegisterID dest)
605     {
606         load32(src, getCachedDataTempRegisterIDAndInvalidate());
607         sub32(dataTempRegister, dest);
608     }
609
610     void sub64(RegisterID src, RegisterID dest)
611     {
612         m_assembler.sub<64>(dest, dest, src);
613     }
614     
615     void sub64(TrustedImm32 imm, RegisterID dest)
616     {
617         if (isUInt12(imm.m_value)) {
618             m_assembler.sub<64>(dest, dest, UInt12(imm.m_value));
619             return;
620         }
621         if (isUInt12(-imm.m_value)) {
622             m_assembler.add<64>(dest, dest, UInt12(-imm.m_value));
623             return;
624         }
625
626         signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
627         m_assembler.sub<64>(dest, dest, dataTempRegister);
628     }
629     
630     void sub64(TrustedImm64 imm, RegisterID dest)
631     {
632         intptr_t immediate = imm.m_value;
633
634         if (isUInt12(immediate)) {
635             m_assembler.sub<64>(dest, dest, UInt12(static_cast<int32_t>(immediate)));
636             return;
637         }
638         if (isUInt12(-immediate)) {
639             m_assembler.add<64>(dest, dest, UInt12(static_cast<int32_t>(-immediate)));
640             return;
641         }
642
643         move(imm, getCachedDataTempRegisterIDAndInvalidate());
644         m_assembler.sub<64>(dest, dest, dataTempRegister);
645     }
646
647     void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
648     {
649         m_assembler.lsr<32>(dest, src, shiftAmount);
650     }
651     
652     void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
653     {
654         m_assembler.lsr<32>(dest, src, imm.m_value & 0x1f);
655     }
656
657     void urshift32(RegisterID shiftAmount, RegisterID dest)
658     {
659         urshift32(dest, shiftAmount, dest);
660     }
661     
662     void urshift32(TrustedImm32 imm, RegisterID dest)
663     {
664         urshift32(dest, imm, dest);
665     }
666
667     void xor32(RegisterID src, RegisterID dest)
668     {
669         xor32(dest, src, dest);
670     }
671
672     void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
673     {
674         m_assembler.eor<32>(dest, op1, op2);
675     }
676
677     void xor32(TrustedImm32 imm, RegisterID dest)
678     {
679         xor32(imm, dest, dest);
680     }
681
682     void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
683     {
684         if (imm.m_value == -1)
685             m_assembler.mvn<32>(dest, src);
686         else {
687             LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
688
689             if (logicalImm.isValid()) {
690                 m_assembler.eor<32>(dest, dest, logicalImm);
691                 return;
692             }
693
694             move(imm, getCachedDataTempRegisterIDAndInvalidate());
695             m_assembler.eor<32>(dest, src, dataTempRegister);
696         }
697     }
698
699     void xor64(RegisterID src, Address address)
700     {
701         load64(address, getCachedDataTempRegisterIDAndInvalidate());
702         m_assembler.eor<64>(dataTempRegister, dataTempRegister, src);
703         store64(dataTempRegister, address);
704     }
705
706     void xor64(RegisterID src, RegisterID dest)
707     {
708         xor64(dest, src, dest);
709     }
710
711     void xor64(RegisterID op1, RegisterID op2, RegisterID dest)
712     {
713         m_assembler.eor<64>(dest, op1, op2);
714     }
715
716     void xor64(TrustedImm32 imm, RegisterID dest)
717     {
718         xor64(imm, dest, dest);
719     }
720
721     void xor64(TrustedImm32 imm, RegisterID src, RegisterID dest)
722     {
723         if (imm.m_value == -1)
724             m_assembler.mvn<64>(dest, src);
725         else {
726             LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
727
728             if (logicalImm.isValid()) {
729                 m_assembler.eor<64>(dest, dest, logicalImm);
730                 return;
731             }
732
733             signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
734             m_assembler.eor<64>(dest, src, dataTempRegister);
735         }
736     }
737
738
739     // Memory access operations:
740
741     void load64(ImplicitAddress address, RegisterID dest)
742     {
743         if (tryLoadWithOffset<64>(dest, address.base, address.offset))
744             return;
745
746         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
747         m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
748     }
749
750     void load64(BaseIndex address, RegisterID dest)
751     {
752         if (!address.offset && (!address.scale || address.scale == 3)) {
753             m_assembler.ldr<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
754             return;
755         }
756
757         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
758         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
759         m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
760     }
761
762     void load64(const void* address, RegisterID dest)
763     {
764         load<64>(address, dest);
765     }
766
767     DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest)
768     {
769         DataLabel32 label(this);
770         signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
771         m_assembler.ldr<64>(dest, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
772         return label;
773     }
774     
775     DataLabelCompact load64WithCompactAddressOffsetPatch(Address address, RegisterID dest)
776     {
777         ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
778         DataLabelCompact label(this);
779         m_assembler.ldr<64>(dest, address.base, address.offset);
780         return label;
781     }
782
783     ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
784     {
785         ConvertibleLoadLabel result(this);
786         ASSERT(!(address.offset & ~0xff8));
787         m_assembler.ldr<64>(dest, address.base, address.offset);
788         return result;
789     }
790
791     void load32(ImplicitAddress address, RegisterID dest)
792     {
793         if (tryLoadWithOffset<32>(dest, address.base, address.offset))
794             return;
795
796         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
797         m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
798     }
799
800     void load32(BaseIndex address, RegisterID dest)
801     {
802         if (!address.offset && (!address.scale || address.scale == 2)) {
803             m_assembler.ldr<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
804             return;
805         }
806
807         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
808         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
809         m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
810     }
811
812     void load32(const void* address, RegisterID dest)
813     {
814         load<32>(address, dest);
815     }
816
817     DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
818     {
819         DataLabel32 label(this);
820         signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
821         m_assembler.ldr<32>(dest, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
822         return label;
823     }
824     
825     DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
826     {
827         ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
828         DataLabelCompact label(this);
829         m_assembler.ldr<32>(dest, address.base, address.offset);
830         return label;
831     }
832
833     void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
834     {
835         load32(address, dest);
836     }
837
838     void load16(ImplicitAddress address, RegisterID dest)
839     {
840         if (tryLoadWithOffset<16>(dest, address.base, address.offset))
841             return;
842
843         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
844         m_assembler.ldrh(dest, address.base, memoryTempRegister);
845     }
846     
847     void load16(BaseIndex address, RegisterID dest)
848     {
849         if (!address.offset && (!address.scale || address.scale == 1)) {
850             m_assembler.ldrh(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
851             return;
852         }
853
854         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
855         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
856         m_assembler.ldrh(dest, address.base, memoryTempRegister);
857     }
858     
859     void load16Unaligned(BaseIndex address, RegisterID dest)
860     {
861         load16(address, dest);
862     }
863
864     void load16Signed(BaseIndex address, RegisterID dest)
865     {
866         if (!address.offset && (!address.scale || address.scale == 1)) {
867             m_assembler.ldrsh<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
868             return;
869         }
870
871         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
872         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
873         m_assembler.ldrsh<64>(dest, address.base, memoryTempRegister);
874     }
875
876     void load8(ImplicitAddress address, RegisterID dest)
877     {
878         if (tryLoadWithOffset<8>(dest, address.base, address.offset))
879             return;
880
881         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
882         m_assembler.ldrb(dest, address.base, memoryTempRegister);
883     }
884
885     void load8(BaseIndex address, RegisterID dest)
886     {
887         if (!address.offset && !address.scale) {
888             m_assembler.ldrb(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
889             return;
890         }
891
892         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
893         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
894         m_assembler.ldrb(dest, address.base, memoryTempRegister);
895     }
896     
897     void load8(const void* address, RegisterID dest)
898     {
899         moveToCachedReg(TrustedImmPtr(address), m_cachedMemoryTempRegister);
900         m_assembler.ldrb(dest, memoryTempRegister, ARM64Registers::zr);
901     }
902
903     void load8Signed(BaseIndex address, RegisterID dest)
904     {
905         if (!address.offset && !address.scale) {
906             m_assembler.ldrsb<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
907             return;
908         }
909
910         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
911         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
912         m_assembler.ldrsb<64>(dest, address.base, memoryTempRegister);
913     }
914
915     void store64(RegisterID src, ImplicitAddress address)
916     {
917         if (tryStoreWithOffset<64>(src, address.base, address.offset))
918             return;
919
920         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
921         m_assembler.str<64>(src, address.base, memoryTempRegister);
922     }
923
924     void store64(RegisterID src, BaseIndex address)
925     {
926         if (!address.offset && (!address.scale || address.scale == 3)) {
927             m_assembler.str<64>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
928             return;
929         }
930
931         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
932         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
933         m_assembler.str<64>(src, address.base, memoryTempRegister);
934     }
935     
936     void store64(RegisterID src, const void* address)
937     {
938         store<64>(src, address);
939     }
940
941     void store64(TrustedImm64 imm, ImplicitAddress address)
942     {
943         if (!imm.m_value) {
944             store64(ARM64Registers::zr, address);
945             return;
946         }
947
948         moveToCachedReg(imm, m_dataMemoryTempRegister);
949         store64(dataTempRegister, address);
950     }
951
952     void store64(TrustedImm64 imm, BaseIndex address)
953     {
954         if (!imm.m_value) {
955             store64(ARM64Registers::zr, address);
956             return;
957         }
958
959         moveToCachedReg(imm, m_dataMemoryTempRegister);
960         store64(dataTempRegister, address);
961     }
962     
963     DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address)
964     {
965         DataLabel32 label(this);
966         signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
967         m_assembler.str<64>(src, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
968         return label;
969     }
970
971     void store32(RegisterID src, ImplicitAddress address)
972     {
973         if (tryStoreWithOffset<32>(src, address.base, address.offset))
974             return;
975
976         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
977         m_assembler.str<32>(src, address.base, memoryTempRegister);
978     }
979
980     void store32(RegisterID src, BaseIndex address)
981     {
982         if (!address.offset && (!address.scale || address.scale == 2)) {
983             m_assembler.str<32>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
984             return;
985         }
986
987         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
988         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
989         m_assembler.str<32>(src, address.base, memoryTempRegister);
990     }
991
992     void store32(RegisterID src, const void* address)
993     {
994         store<32>(src, address);
995     }
996
997     void store32(TrustedImm32 imm, ImplicitAddress address)
998     {
999         if (!imm.m_value) {
1000             store32(ARM64Registers::zr, address);
1001             return;
1002         }
1003
1004         moveToCachedReg(imm, m_dataMemoryTempRegister);
1005         store32(dataTempRegister, address);
1006     }
1007
1008     void store32(TrustedImm32 imm, BaseIndex address)
1009     {
1010         if (!imm.m_value) {
1011             store32(ARM64Registers::zr, address);
1012             return;
1013         }
1014
1015         moveToCachedReg(imm, m_dataMemoryTempRegister);
1016         store32(dataTempRegister, address);
1017     }
1018
1019     void store32(TrustedImm32 imm, const void* address)
1020     {
1021         if (!imm.m_value) {
1022             store32(ARM64Registers::zr, address);
1023             return;
1024         }
1025
1026         moveToCachedReg(imm, m_dataMemoryTempRegister);
1027         store32(dataTempRegister, address);
1028     }
1029
1030     DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
1031     {
1032         DataLabel32 label(this);
1033         signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
1034         m_assembler.str<32>(src, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
1035         return label;
1036     }
1037
1038     void store16(RegisterID src, BaseIndex address)
1039     {
1040         if (!address.offset && (!address.scale || address.scale == 1)) {
1041             m_assembler.strh(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1042             return;
1043         }
1044
1045         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1046         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1047         m_assembler.strh(src, address.base, memoryTempRegister);
1048     }
1049
1050     void store8(RegisterID src, BaseIndex address)
1051     {
1052         if (!address.offset && !address.scale) {
1053             m_assembler.strb(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1054             return;
1055         }
1056
1057         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1058         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1059         m_assembler.strb(src, address.base, memoryTempRegister);
1060     }
1061
1062     void store8(RegisterID src, void* address)
1063     {
1064         move(TrustedImmPtr(address), getCachedMemoryTempRegisterIDAndInvalidate());
1065         m_assembler.strb(src, memoryTempRegister, 0);
1066     }
1067
1068     void store8(TrustedImm32 imm, void* address)
1069     {
1070         if (!imm.m_value) {
1071             store8(ARM64Registers::zr, address);
1072             return;
1073         }
1074
1075         move(imm, getCachedDataTempRegisterIDAndInvalidate());
1076         store8(dataTempRegister, address);
1077     }
1078
1079
1080     // Floating-point operations:
1081
1082     static bool supportsFloatingPoint() { return true; }
1083     static bool supportsFloatingPointTruncate() { return true; }
1084     static bool supportsFloatingPointSqrt() { return true; }
1085     static bool supportsFloatingPointAbs() { return true; }
1086
1087     enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
1088
1089     void absDouble(FPRegisterID src, FPRegisterID dest)
1090     {
1091         m_assembler.fabs<64>(dest, src);
1092     }
1093
1094     void addDouble(FPRegisterID src, FPRegisterID dest)
1095     {
1096         addDouble(dest, src, dest);
1097     }
1098
1099     void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1100     {
1101         m_assembler.fadd<64>(dest, op1, op2);
1102     }
1103
1104     void addDouble(Address src, FPRegisterID dest)
1105     {
1106         loadDouble(src, fpTempRegister);
1107         addDouble(fpTempRegister, dest);
1108     }
1109
1110     void addDouble(AbsoluteAddress address, FPRegisterID dest)
1111     {
1112         loadDouble(address.m_ptr, fpTempRegister);
1113         addDouble(fpTempRegister, dest);
1114     }
1115
1116     void ceilDouble(FPRegisterID src, FPRegisterID dest)
1117     {
1118         m_assembler.frintp<64>(dest, src);
1119     }
1120
1121     void floorDouble(FPRegisterID src, FPRegisterID dest)
1122     {
1123         m_assembler.frintm<64>(dest, src);
1124     }
1125
1126     // Convert 'src' to an integer, and places the resulting 'dest'.
1127     // If the result is not representable as a 32 bit value, branch.
1128     // May also branch for some values that are representable in 32 bits
1129     // (specifically, in this case, 0).
1130     void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true)
1131     {
1132         m_assembler.fcvtns<32, 64>(dest, src);
1133
1134         // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
1135         m_assembler.scvtf<64, 32>(fpTempRegister, dest);
1136         failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister));
1137
1138         // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
1139         if (negZeroCheck)
1140             failureCases.append(branchTest32(Zero, dest));
1141     }
1142
1143     Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
1144     {
1145         m_assembler.fcmp<64>(left, right);
1146
1147         if (cond == DoubleNotEqual) {
1148             // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
1149             Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1150             Jump result = makeBranch(ARM64Assembler::ConditionNE);
1151             unordered.link(this);
1152             return result;
1153         }
1154         if (cond == DoubleEqualOrUnordered) {
1155             Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1156             Jump notEqual = makeBranch(ARM64Assembler::ConditionNE);
1157             unordered.link(this);
1158             // We get here if either unordered or equal.
1159             Jump result = jump();
1160             notEqual.link(this);
1161             return result;
1162         }
1163         return makeBranch(cond);
1164     }
1165
1166     Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
1167     {
1168         m_assembler.fcmp_0<64>(reg);
1169         Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1170         Jump result = makeBranch(ARM64Assembler::ConditionNE);
1171         unordered.link(this);
1172         return result;
1173     }
1174
1175     Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID)
1176     {
1177         m_assembler.fcmp_0<64>(reg);
1178         Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1179         Jump notEqual = makeBranch(ARM64Assembler::ConditionNE);
1180         unordered.link(this);
1181         // We get here if either unordered or equal.
1182         Jump result = jump();
1183         notEqual.link(this);
1184         return result;
1185     }
1186
1187     Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
1188     {
1189         // Truncate to a 64-bit integer in dataTempRegister, copy the low 32-bit to dest.
1190         m_assembler.fcvtzs<64, 64>(getCachedDataTempRegisterIDAndInvalidate(), src);
1191         zeroExtend32ToPtr(dataTempRegister, dest);
1192         // Check thlow 32-bits sign extend to be equal to the full value.
1193         m_assembler.cmp<64>(dataTempRegister, dataTempRegister, ARM64Assembler::SXTW, 0);
1194         return Jump(makeBranch(branchType == BranchIfTruncateSuccessful ? Equal : NotEqual));
1195     }
1196
1197     Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
1198     {
1199         // Truncate to a 64-bit integer in dataTempRegister, copy the low 32-bit to dest.
1200         m_assembler.fcvtzs<64, 64>(dest, src);
1201         // Check thlow 32-bits zero extend to be equal to the full value.
1202         m_assembler.cmp<64>(dest, dest, ARM64Assembler::UXTW, 0);
1203         return Jump(makeBranch(branchType == BranchIfTruncateSuccessful ? Equal : NotEqual));
1204     }
1205
1206     void convertDoubleToFloat(FPRegisterID src, FPRegisterID dest)
1207     {
1208         m_assembler.fcvt<32, 64>(dest, src);
1209     }
1210
1211     void convertFloatToDouble(FPRegisterID src, FPRegisterID dest)
1212     {
1213         m_assembler.fcvt<64, 32>(dest, src);
1214     }
1215     
1216     void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest)
1217     {
1218         move(imm, getCachedDataTempRegisterIDAndInvalidate());
1219         convertInt32ToDouble(dataTempRegister, dest);
1220     }
1221     
1222     void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
1223     {
1224         m_assembler.scvtf<64, 32>(dest, src);
1225     }
1226
1227     void convertInt32ToDouble(Address address, FPRegisterID dest)
1228     {
1229         load32(address, getCachedDataTempRegisterIDAndInvalidate());
1230         convertInt32ToDouble(dataTempRegister, dest);
1231     }
1232
1233     void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest)
1234     {
1235         load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1236         convertInt32ToDouble(dataTempRegister, dest);
1237     }
1238     
1239     void convertInt64ToDouble(RegisterID src, FPRegisterID dest)
1240     {
1241         m_assembler.scvtf<64, 64>(dest, src);
1242     }
1243     
1244     void divDouble(FPRegisterID src, FPRegisterID dest)
1245     {
1246         divDouble(dest, src, dest);
1247     }
1248
1249     void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1250     {
1251         m_assembler.fdiv<64>(dest, op1, op2);
1252     }
1253
1254     void loadDouble(ImplicitAddress address, FPRegisterID dest)
1255     {
1256         if (tryLoadWithOffset<64>(dest, address.base, address.offset))
1257             return;
1258
1259         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1260         m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
1261     }
1262
1263     void loadDouble(BaseIndex address, FPRegisterID dest)
1264     {
1265         if (!address.offset && (!address.scale || address.scale == 3)) {
1266             m_assembler.ldr<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1267             return;
1268         }
1269
1270         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1271         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1272         m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
1273     }
1274     
1275     void loadDouble(const void* address, FPRegisterID dest)
1276     {
1277         moveToCachedReg(TrustedImmPtr(address), m_cachedMemoryTempRegister);
1278         m_assembler.ldr<64>(dest, memoryTempRegister, ARM64Registers::zr);
1279     }
1280
1281     void loadFloat(BaseIndex address, FPRegisterID dest)
1282     {
1283         if (!address.offset && (!address.scale || address.scale == 2)) {
1284             m_assembler.ldr<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1285             return;
1286         }
1287
1288         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1289         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1290         m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
1291     }
1292
1293     void moveDouble(FPRegisterID src, FPRegisterID dest)
1294     {
1295         m_assembler.fmov<64>(dest, src);
1296     }
1297
1298     void moveDoubleTo64(FPRegisterID src, RegisterID dest)
1299     {
1300         m_assembler.fmov<64>(dest, src);
1301     }
1302
1303     void move64ToDouble(RegisterID src, FPRegisterID dest)
1304     {
1305         m_assembler.fmov<64>(dest, src);
1306     }
1307
1308     void mulDouble(FPRegisterID src, FPRegisterID dest)
1309     {
1310         mulDouble(dest, src, dest);
1311     }
1312
1313     void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1314     {
1315         m_assembler.fmul<64>(dest, op1, op2);
1316     }
1317
1318     void mulDouble(Address src, FPRegisterID dest)
1319     {
1320         loadDouble(src, fpTempRegister);
1321         mulDouble(fpTempRegister, dest);
1322     }
1323
1324     void negateDouble(FPRegisterID src, FPRegisterID dest)
1325     {
1326         m_assembler.fneg<64>(dest, src);
1327     }
1328
1329     void sqrtDouble(FPRegisterID src, FPRegisterID dest)
1330     {
1331         m_assembler.fsqrt<64>(dest, src);
1332     }
1333
1334     void storeDouble(FPRegisterID src, ImplicitAddress address)
1335     {
1336         if (tryStoreWithOffset<64>(src, address.base, address.offset))
1337             return;
1338
1339         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1340         m_assembler.str<64>(src, address.base, memoryTempRegister);
1341     }
1342
1343     void storeDouble(FPRegisterID src, const void* address)
1344     {
1345         moveToCachedReg(TrustedImmPtr(address), m_cachedMemoryTempRegister);
1346         m_assembler.str<64>(src, memoryTempRegister, ARM64Registers::zr);
1347     }
1348
1349     void storeDouble(FPRegisterID src, BaseIndex address)
1350     {
1351         if (!address.offset && (!address.scale || address.scale == 3)) {
1352             m_assembler.str<64>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1353             return;
1354         }
1355
1356         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1357         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1358         m_assembler.str<64>(src, address.base, memoryTempRegister);
1359     }
1360     
1361     void storeFloat(FPRegisterID src, BaseIndex address)
1362     {
1363         if (!address.offset && (!address.scale || address.scale == 2)) {
1364             m_assembler.str<32>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1365             return;
1366         }
1367
1368         signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1369         m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1370         m_assembler.str<32>(src, address.base, memoryTempRegister);
1371     }
1372
1373     void subDouble(FPRegisterID src, FPRegisterID dest)
1374     {
1375         subDouble(dest, src, dest);
1376     }
1377
1378     void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1379     {
1380         m_assembler.fsub<64>(dest, op1, op2);
1381     }
1382
1383     void subDouble(Address src, FPRegisterID dest)
1384     {
1385         loadDouble(src, fpTempRegister);
1386         subDouble(fpTempRegister, dest);
1387     }
1388
1389     // Result is undefined if the value is outside of the integer range.
1390     void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
1391     {
1392         m_assembler.fcvtzs<32, 64>(dest, src);
1393     }
1394
1395     void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
1396     {
1397         m_assembler.fcvtzu<32, 64>(dest, src);
1398     }
1399
1400
1401     // Stack manipulation operations:
1402     //
1403     // The ABI is assumed to provide a stack abstraction to memory,
1404     // containing machine word sized units of data. Push and pop
1405     // operations add and remove a single register sized unit of data
1406     // to or from the stack. These operations are not supported on
1407     // ARM64. Peek and poke operations read or write values on the
1408     // stack, without moving the current stack position. Additionally,
1409     // there are popToRestore and pushToSave operations, which are
1410     // designed just for quick-and-dirty saving and restoring of
1411     // temporary values. These operations don't claim to have any
1412     // ABI compatibility.
1413     
1414     void pop(RegisterID) NO_RETURN_DUE_TO_CRASH
1415     {
1416         CRASH();
1417     }
1418
1419     void push(RegisterID) NO_RETURN_DUE_TO_CRASH
1420     {
1421         CRASH();
1422     }
1423
1424     void push(Address) NO_RETURN_DUE_TO_CRASH
1425     {
1426         CRASH();
1427     }
1428
1429     void push(TrustedImm32) NO_RETURN_DUE_TO_CRASH
1430     {
1431         CRASH();
1432     }
1433
1434     void popToRestore(RegisterID dest)
1435     {
1436         m_assembler.ldr<64>(dest, ARM64Registers::sp, PostIndex(16));
1437     }
1438
1439     void pushToSave(RegisterID src)
1440     {
1441         m_assembler.str<64>(src, ARM64Registers::sp, PreIndex(-16));
1442     }
1443
1444     void pushToSave(Address address)
1445     {
1446         load32(address, getCachedDataTempRegisterIDAndInvalidate());
1447         pushToSave(dataTempRegister);
1448     }
1449
1450     void pushToSave(TrustedImm32 imm)
1451     {
1452         move(imm, getCachedDataTempRegisterIDAndInvalidate());
1453         pushToSave(dataTempRegister);
1454     }
1455     
1456     void popToRestore(FPRegisterID dest)
1457     {
1458         loadDouble(stackPointerRegister, dest);
1459         add64(TrustedImm32(16), stackPointerRegister);
1460     }
1461     
1462     void pushToSave(FPRegisterID src)
1463     {
1464         sub64(TrustedImm32(16), stackPointerRegister);
1465         storeDouble(src, stackPointerRegister);
1466     }
1467
1468
1469     // Register move operations:
1470
1471     void move(RegisterID src, RegisterID dest)
1472     {
1473         if (src != dest)
1474             m_assembler.mov<64>(dest, src);
1475     }
1476
1477     void move(TrustedImm32 imm, RegisterID dest)
1478     {
1479         moveInternal<TrustedImm32, int32_t>(imm, dest);
1480     }
1481
1482     void move(TrustedImmPtr imm, RegisterID dest)
1483     {
1484         moveInternal<TrustedImmPtr, intptr_t>(imm, dest);
1485     }
1486
1487     void move(TrustedImm64 imm, RegisterID dest)
1488     {
1489         moveInternal<TrustedImm64, int64_t>(imm, dest);
1490     }
1491
1492     void swap(RegisterID reg1, RegisterID reg2)
1493     {
1494         move(reg1, getCachedDataTempRegisterIDAndInvalidate());
1495         move(reg2, reg1);
1496         move(dataTempRegister, reg2);
1497     }
1498     
1499     void signExtend32ToPtr(RegisterID src, RegisterID dest)
1500     {
1501         m_assembler.sxtw(dest, src);
1502     }
1503
1504     void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
1505     {
1506         m_assembler.uxtw(dest, src);
1507     }
1508
1509
1510     // Forwards / external control flow operations:
1511     //
1512     // This set of jump and conditional branch operations return a Jump
1513     // object which may linked at a later point, allow forwards jump,
1514     // or jumps that will require external linkage (after the code has been
1515     // relocated).
1516     //
1517     // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
1518     // respecitvely, for unsigned comparisons the names b, a, be, and ae are
1519     // used (representing the names 'below' and 'above').
1520     //
1521     // Operands to the comparision are provided in the expected order, e.g.
1522     // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
1523     // treated as a signed 32bit value, is less than or equal to 5.
1524     //
1525     // jz and jnz test whether the first operand is equal to zero, and take
1526     // an optional second operand of a mask under which to perform the test.
1527
1528     Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
1529     {
1530         m_assembler.cmp<32>(left, right);
1531         return Jump(makeBranch(cond));
1532     }
1533
1534     Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
1535     {
1536         if (isUInt12(right.m_value))
1537             m_assembler.cmp<32>(left, UInt12(right.m_value));
1538         else if (isUInt12(-right.m_value))
1539             m_assembler.cmn<32>(left, UInt12(-right.m_value));
1540         else {
1541             moveToCachedReg(right, m_dataMemoryTempRegister);
1542             m_assembler.cmp<32>(left, dataTempRegister);
1543         }
1544         return Jump(makeBranch(cond));
1545     }
1546
1547     Jump branch32(RelationalCondition cond, RegisterID left, Address right)
1548     {
1549         load32(right, getCachedMemoryTempRegisterIDAndInvalidate());
1550         return branch32(cond, left, memoryTempRegister);
1551     }
1552
1553     Jump branch32(RelationalCondition cond, Address left, RegisterID right)
1554     {
1555         load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
1556         return branch32(cond, memoryTempRegister, right);
1557     }
1558
1559     Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
1560     {
1561         load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
1562         return branch32(cond, memoryTempRegister, right);
1563     }
1564
1565     Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1566     {
1567         load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
1568         return branch32(cond, memoryTempRegister, right);
1569     }
1570
1571     Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
1572     {
1573         load32(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
1574         return branch32(cond, memoryTempRegister, right);
1575     }
1576
1577     Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
1578     {
1579         load32(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
1580         return branch32(cond, memoryTempRegister, right);
1581     }
1582
1583     Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right)
1584     {
1585         m_assembler.cmp<64>(left, right);
1586         return Jump(makeBranch(cond));
1587     }
1588
1589     Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right)
1590     {
1591         intptr_t immediate = right.m_value;
1592         if (isUInt12(immediate))
1593             m_assembler.cmp<64>(left, UInt12(static_cast<int32_t>(immediate)));
1594         else if (isUInt12(-immediate))
1595             m_assembler.cmn<64>(left, UInt12(static_cast<int32_t>(-immediate)));
1596         else {
1597             moveToCachedReg(right, m_dataMemoryTempRegister);
1598             m_assembler.cmp<64>(left, dataTempRegister);
1599         }
1600         return Jump(makeBranch(cond));
1601     }
1602
1603     Jump branch64(RelationalCondition cond, RegisterID left, Address right)
1604     {
1605         load64(right, getCachedMemoryTempRegisterIDAndInvalidate());
1606         return branch64(cond, left, memoryTempRegister);
1607     }
1608
1609     Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
1610     {
1611         load64(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
1612         return branch64(cond, memoryTempRegister, right);
1613     }
1614
1615     Jump branch64(RelationalCondition cond, Address left, RegisterID right)
1616     {
1617         load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
1618         return branch64(cond, memoryTempRegister, right);
1619     }
1620
1621     Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right)
1622     {
1623         load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
1624         return branch64(cond, memoryTempRegister, right);
1625     }
1626
1627     Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
1628     {
1629         ASSERT(!(0xffffff00 & right.m_value));
1630         load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
1631         return branch32(cond, memoryTempRegister, right);
1632     }
1633
1634     Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1635     {
1636         ASSERT(!(0xffffff00 & right.m_value));
1637         load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
1638         return branch32(cond, memoryTempRegister, right);
1639     }
1640     
1641     Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
1642     {
1643         ASSERT(!(0xffffff00 & right.m_value));
1644         load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
1645         return branch32(cond, memoryTempRegister, right);
1646     }
1647     
1648     Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
1649     {
1650         m_assembler.tst<32>(reg, mask);
1651         return Jump(makeBranch(cond));
1652     }
1653
1654     Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1655     {
1656         if (mask.m_value == -1) {
1657             if ((cond == Zero) || (cond == NonZero))
1658                 return Jump(makeCompareAndBranch<32>(static_cast<ZeroCondition>(cond), reg));
1659             m_assembler.tst<32>(reg, reg);
1660         } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
1661             return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
1662         else {
1663             if ((cond == Zero) || (cond == NonZero)) {
1664                 LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value);
1665
1666                 if (logicalImm.isValid()) {
1667                     m_assembler.tst<32>(reg, logicalImm);
1668                     return Jump(makeBranch(cond));
1669                 }
1670             }
1671
1672             move(mask, getCachedDataTempRegisterIDAndInvalidate());
1673             m_assembler.tst<32>(reg, dataTempRegister);
1674         }
1675         return Jump(makeBranch(cond));
1676     }
1677
1678     Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1679     {
1680         load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
1681         return branchTest32(cond, memoryTempRegister, mask);
1682     }
1683
1684     Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1685     {
1686         load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
1687         return branchTest32(cond, memoryTempRegister, mask);
1688     }
1689
1690     Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask)
1691     {
1692         m_assembler.tst<64>(reg, mask);
1693         return Jump(makeBranch(cond));
1694     }
1695
1696     Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1697     {
1698         if (mask.m_value == -1) {
1699             if ((cond == Zero) || (cond == NonZero))
1700                 return Jump(makeCompareAndBranch<64>(static_cast<ZeroCondition>(cond), reg));
1701             m_assembler.tst<64>(reg, reg);
1702         } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
1703             return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
1704         else {
1705             if ((cond == Zero) || (cond == NonZero)) {
1706                 LogicalImmediate logicalImm = LogicalImmediate::create64(mask.m_value);
1707
1708                 if (logicalImm.isValid()) {
1709                     m_assembler.tst<64>(reg, logicalImm);
1710                     return Jump(makeBranch(cond));
1711                 }
1712             }
1713
1714             signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
1715             m_assembler.tst<64>(reg, dataTempRegister);
1716         }
1717         return Jump(makeBranch(cond));
1718     }
1719
1720     Jump branchTest64(ResultCondition cond, Address address, RegisterID mask)
1721     {
1722         load64(address, getCachedDataTempRegisterIDAndInvalidate());
1723         return branchTest64(cond, dataTempRegister, mask);
1724     }
1725
1726     Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1727     {
1728         load64(address, getCachedDataTempRegisterIDAndInvalidate());
1729         return branchTest64(cond, dataTempRegister, mask);
1730     }
1731
1732     Jump branchTest64(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1733     {
1734         load64(address, getCachedDataTempRegisterIDAndInvalidate());
1735         return branchTest64(cond, dataTempRegister, mask);
1736     }
1737
1738     Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
1739     {
1740         load64(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1741         return branchTest64(cond, dataTempRegister, mask);
1742     }
1743
1744     Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1745     {
1746         load8(address, getCachedDataTempRegisterIDAndInvalidate());
1747         return branchTest32(cond, dataTempRegister, mask);
1748     }
1749
1750     Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
1751     {
1752         load8(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1753         return branchTest32(cond, dataTempRegister, mask);
1754     }
1755
1756     Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
1757     {
1758         move(TrustedImmPtr(reinterpret_cast<void*>(address.offset)), getCachedDataTempRegisterIDAndInvalidate());
1759         m_assembler.ldrb(dataTempRegister, address.base, dataTempRegister);
1760         return branchTest32(cond, dataTempRegister, mask);
1761     }
1762
1763     Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1764     {
1765         return branch32(cond, left, right);
1766     }
1767
1768
1769     // Arithmetic control flow operations:
1770     //
1771     // This set of conditional branch operations branch based
1772     // on the result of an arithmetic operation. The operation
1773     // is performed as normal, storing the result.
1774     //
1775     // * jz operations branch if the result is zero.
1776     // * jo operations branch if the (signed) arithmetic
1777     //   operation caused an overflow to occur.
1778     
1779     Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
1780     {
1781         m_assembler.add<32, S>(dest, op1, op2);
1782         return Jump(makeBranch(cond));
1783     }
1784
1785     Jump branchAdd32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
1786     {
1787         if (isUInt12(imm.m_value)) {
1788             m_assembler.add<32, S>(dest, op1, UInt12(imm.m_value));
1789             return Jump(makeBranch(cond));
1790         }
1791         if (isUInt12(-imm.m_value)) {
1792             m_assembler.sub<32, S>(dest, op1, UInt12(-imm.m_value));
1793             return Jump(makeBranch(cond));
1794         }
1795
1796         signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
1797         return branchAdd32(cond, op1, dataTempRegister, dest);
1798     }
1799
1800     Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
1801     {
1802         return branchAdd32(cond, dest, src, dest);
1803     }
1804
1805     Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1806     {
1807         return branchAdd32(cond, dest, imm, dest);
1808     }
1809
1810     Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress address)
1811     {
1812         load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1813
1814         if (isUInt12(imm.m_value)) {
1815             m_assembler.add<32, S>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
1816             store32(dataTempRegister, address.m_ptr);
1817         } else if (isUInt12(-imm.m_value)) {
1818             m_assembler.sub<32, S>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
1819             store32(dataTempRegister, address.m_ptr);
1820         } else {
1821             move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
1822             m_assembler.add<32, S>(dataTempRegister, dataTempRegister, memoryTempRegister);
1823             store32(dataTempRegister, address.m_ptr);
1824         }
1825
1826         return Jump(makeBranch(cond));
1827     }
1828
1829     Jump branchAdd64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
1830     {
1831         m_assembler.add<64, S>(dest, op1, op2);
1832         return Jump(makeBranch(cond));
1833     }
1834
1835     Jump branchAdd64(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
1836     {
1837         if (isUInt12(imm.m_value)) {
1838             m_assembler.add<64, S>(dest, op1, UInt12(imm.m_value));
1839             return Jump(makeBranch(cond));
1840         }
1841         if (isUInt12(-imm.m_value)) {
1842             m_assembler.sub<64, S>(dest, op1, UInt12(-imm.m_value));
1843             return Jump(makeBranch(cond));
1844         }
1845
1846         move(imm, getCachedDataTempRegisterIDAndInvalidate());
1847         return branchAdd64(cond, op1, dataTempRegister, dest);
1848     }
1849
1850     Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest)
1851     {
1852         return branchAdd64(cond, dest, src, dest);
1853     }
1854
1855     Jump branchAdd64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1856     {
1857         return branchAdd64(cond, dest, imm, dest);
1858     }
1859
1860     Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1861     {
1862         ASSERT(cond != Signed);
1863
1864         if (cond != Overflow) {
1865             m_assembler.mul<32>(dest, src1, src2);
1866             return branchTest32(cond, dest);
1867         }
1868
1869         // This is a signed multiple of two 32-bit values, producing a 64-bit result.
1870         m_assembler.smull(dest, src1, src2);
1871         // Copy bits 63..32 of the result to bits 31..0 of dataTempRegister.
1872         m_assembler.asr<64>(getCachedDataTempRegisterIDAndInvalidate(), dest, 32);
1873         // Splat bit 31 of the result to bits 31..0 of memoryTempRegister.
1874         m_assembler.asr<32>(getCachedMemoryTempRegisterIDAndInvalidate(), dest, 31);
1875         // After a mul32 the top 32 bits of the register should be clear.
1876         zeroExtend32ToPtr(dest, dest);
1877         // Check that bits 31..63 of the original result were all equal.
1878         return branch32(NotEqual, memoryTempRegister, dataTempRegister);
1879     }
1880
1881     Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
1882     {
1883         return branchMul32(cond, dest, src, dest);
1884     }
1885
1886     Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
1887     {
1888         move(imm, getCachedDataTempRegisterIDAndInvalidate());
1889         return branchMul32(cond, dataTempRegister, src, dest);
1890     }
1891
1892     Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1893     {
1894         ASSERT(cond != Signed);
1895
1896         // This is a signed multiple of two 64-bit values, producing a 64-bit result.
1897         m_assembler.mul<64>(dest, src1, src2);
1898
1899         if (cond != Overflow)
1900             return branchTest64(cond, dest);
1901
1902         // Compute bits 127..64 of the result into dataTempRegister.
1903         m_assembler.smulh(getCachedDataTempRegisterIDAndInvalidate(), src1, src2);
1904         // Splat bit 63 of the result to bits 63..0 of memoryTempRegister.
1905         m_assembler.asr<64>(getCachedMemoryTempRegisterIDAndInvalidate(), dest, 63);
1906         // Check that bits 31..63 of the original result were all equal.
1907         return branch64(NotEqual, memoryTempRegister, dataTempRegister);
1908     }
1909
1910     Jump branchMul64(ResultCondition cond, RegisterID src, RegisterID dest)
1911     {
1912         return branchMul64(cond, dest, src, dest);
1913     }
1914
1915     Jump branchNeg32(ResultCondition cond, RegisterID dest)
1916     {
1917         m_assembler.neg<32, S>(dest, dest);
1918         return Jump(makeBranch(cond));
1919     }
1920
1921     Jump branchNeg64(ResultCondition cond, RegisterID srcDest)
1922     {
1923         m_assembler.neg<64, S>(srcDest, srcDest);
1924         return Jump(makeBranch(cond));
1925     }
1926
1927     Jump branchSub32(ResultCondition cond, RegisterID dest)
1928     {
1929         m_assembler.neg<32, S>(dest, dest);
1930         return Jump(makeBranch(cond));
1931     }
1932
1933     Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
1934     {
1935         m_assembler.sub<32, S>(dest, op1, op2);
1936         return Jump(makeBranch(cond));
1937     }
1938
1939     Jump branchSub32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
1940     {
1941         if (isUInt12(imm.m_value)) {
1942             m_assembler.sub<32, S>(dest, op1, UInt12(imm.m_value));
1943             return Jump(makeBranch(cond));
1944         }
1945         if (isUInt12(-imm.m_value)) {
1946             m_assembler.add<32, S>(dest, op1, UInt12(-imm.m_value));
1947             return Jump(makeBranch(cond));
1948         }
1949
1950         signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
1951         return branchSub32(cond, op1, dataTempRegister, dest);
1952     }
1953
1954     Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
1955     {
1956         return branchSub32(cond, dest, src, dest);
1957     }
1958
1959     Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1960     {
1961         return branchSub32(cond, dest, imm, dest);
1962     }
1963
1964     Jump branchSub64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
1965     {
1966         m_assembler.sub<64, S>(dest, op1, op2);
1967         return Jump(makeBranch(cond));
1968     }
1969
1970     Jump branchSub64(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
1971     {
1972         if (isUInt12(imm.m_value)) {
1973             m_assembler.sub<64, S>(dest, op1, UInt12(imm.m_value));
1974             return Jump(makeBranch(cond));
1975         }
1976         if (isUInt12(-imm.m_value)) {
1977             m_assembler.add<64, S>(dest, op1, UInt12(-imm.m_value));
1978             return Jump(makeBranch(cond));
1979         }
1980
1981         move(imm, getCachedDataTempRegisterIDAndInvalidate());
1982         return branchSub64(cond, op1, dataTempRegister, dest);
1983     }
1984
1985     Jump branchSub64(ResultCondition cond, RegisterID src, RegisterID dest)
1986     {
1987         return branchSub64(cond, dest, src, dest);
1988     }
1989
1990     Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1991     {
1992         return branchSub64(cond, dest, imm, dest);
1993     }
1994
1995
1996     // Jumps, calls, returns
1997
1998     ALWAYS_INLINE Call call()
1999     {
2000         AssemblerLabel pointerLabel = m_assembler.label();
2001         moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate());
2002         invalidateAllTempRegisters();
2003         m_assembler.blr(dataTempRegister);
2004         AssemblerLabel callLabel = m_assembler.label();
2005         ASSERT_UNUSED(pointerLabel, ARM64Assembler::getDifferenceBetweenLabels(callLabel, pointerLabel) == REPATCH_OFFSET_CALL_TO_POINTER);
2006         return Call(callLabel, Call::Linkable);
2007     }
2008
2009     ALWAYS_INLINE Call call(RegisterID target)
2010     {
2011         invalidateAllTempRegisters();
2012         m_assembler.blr(target);
2013         return Call(m_assembler.label(), Call::None);
2014     }
2015
2016     ALWAYS_INLINE Call call(Address address)
2017     {
2018         load64(address, getCachedDataTempRegisterIDAndInvalidate());
2019         return call(dataTempRegister);
2020     }
2021
2022     ALWAYS_INLINE Jump jump()
2023     {
2024         AssemblerLabel label = m_assembler.label();
2025         m_assembler.b();
2026         return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpNoConditionFixedSize : ARM64Assembler::JumpNoCondition);
2027     }
2028
2029     void jump(RegisterID target)
2030     {
2031         m_assembler.br(target);
2032     }
2033
2034     void jump(Address address)
2035     {
2036         load64(address, getCachedDataTempRegisterIDAndInvalidate());
2037         m_assembler.br(dataTempRegister);
2038     }
2039
2040     void jump(AbsoluteAddress address)
2041     {
2042         move(TrustedImmPtr(address.m_ptr), getCachedDataTempRegisterIDAndInvalidate());
2043         load64(Address(dataTempRegister), dataTempRegister);
2044         m_assembler.br(dataTempRegister);
2045     }
2046
2047     ALWAYS_INLINE Call makeTailRecursiveCall(Jump oldJump)
2048     {
2049         oldJump.link(this);
2050         return tailRecursiveCall();
2051     }
2052
2053     ALWAYS_INLINE Call nearCall()
2054     {
2055         m_assembler.bl();
2056         return Call(m_assembler.label(), Call::LinkableNear);
2057     }
2058
2059     ALWAYS_INLINE void ret()
2060     {
2061         m_assembler.ret();
2062     }
2063
2064     ALWAYS_INLINE Call tailRecursiveCall()
2065     {
2066         // Like a normal call, but don't link.
2067         AssemblerLabel pointerLabel = m_assembler.label();
2068         moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate());
2069         m_assembler.br(dataTempRegister);
2070         AssemblerLabel callLabel = m_assembler.label();
2071         ASSERT_UNUSED(pointerLabel, ARM64Assembler::getDifferenceBetweenLabels(callLabel, pointerLabel) == REPATCH_OFFSET_CALL_TO_POINTER);
2072         return Call(callLabel, Call::Linkable);
2073     }
2074
2075
2076     // Comparisons operations
2077
2078     void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
2079     {
2080         m_assembler.cmp<32>(left, right);
2081         m_assembler.cset<32>(dest, ARM64Condition(cond));
2082     }
2083
2084     void compare32(RelationalCondition cond, Address left, RegisterID right, RegisterID dest)
2085     {
2086         load32(left, getCachedDataTempRegisterIDAndInvalidate());
2087         m_assembler.cmp<32>(dataTempRegister, right);
2088         m_assembler.cset<32>(dest, ARM64Condition(cond));
2089     }
2090
2091     void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
2092     {
2093         move(right, getCachedDataTempRegisterIDAndInvalidate());
2094         m_assembler.cmp<32>(left, dataTempRegister);
2095         m_assembler.cset<32>(dest, ARM64Condition(cond));
2096     }
2097
2098     void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
2099     {
2100         m_assembler.cmp<64>(left, right);
2101         m_assembler.cset<32>(dest, ARM64Condition(cond));
2102     }
2103     
2104     void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
2105     {
2106         signExtend32ToPtr(right, getCachedDataTempRegisterIDAndInvalidate());
2107         m_assembler.cmp<64>(left, dataTempRegister);
2108         m_assembler.cset<32>(dest, ARM64Condition(cond));
2109     }
2110
2111     void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
2112     {
2113         load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
2114         move(right, getCachedDataTempRegisterIDAndInvalidate());
2115         compare32(cond, memoryTempRegister, dataTempRegister, dest);
2116     }
2117     
2118     void test32(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest)
2119     {
2120         if (mask.m_value == -1)
2121             m_assembler.tst<32>(src, src);
2122         else {
2123             signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
2124             m_assembler.tst<32>(src, dataTempRegister);
2125         }
2126         m_assembler.cset<32>(dest, ARM64Condition(cond));
2127     }
2128
2129     void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
2130     {
2131         load32(address, getCachedDataTempRegisterIDAndInvalidate());
2132         test32(cond, dataTempRegister, mask, dest);
2133     }
2134
2135     void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
2136     {
2137         load8(address, getCachedDataTempRegisterIDAndInvalidate());
2138         test32(cond, dataTempRegister, mask, dest);
2139     }
2140
2141     void test64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
2142     {
2143         m_assembler.tst<64>(op1, op2);
2144         m_assembler.cset<32>(dest, ARM64Condition(cond));
2145     }
2146
2147     void test64(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest)
2148     {
2149         if (mask.m_value == -1)
2150             m_assembler.tst<64>(src, src);
2151         else {
2152             signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
2153             m_assembler.tst<64>(src, dataTempRegister);
2154         }
2155         m_assembler.cset<32>(dest, ARM64Condition(cond));
2156     }
2157
2158
2159     // Patchable operations
2160
2161     ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dest)
2162     {
2163         DataLabel32 label(this);
2164         moveWithFixedWidth(imm, dest);
2165         return label;
2166     }
2167
2168     ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dest)
2169     {
2170         DataLabelPtr label(this);
2171         moveWithFixedWidth(imm, dest);
2172         return label;
2173     }
2174
2175     ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
2176     {
2177         dataLabel = DataLabelPtr(this);
2178         moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
2179         return branch64(cond, left, dataTempRegister);
2180     }
2181
2182     ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
2183     {
2184         dataLabel = DataLabelPtr(this);
2185         moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
2186         return branch64(cond, left, dataTempRegister);
2187     }
2188
2189     PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
2190     {
2191         m_makeJumpPatchable = true;
2192         Jump result = branch32(cond, left, TrustedImm32(right));
2193         m_makeJumpPatchable = false;
2194         return PatchableJump(result);
2195     }
2196
2197     PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
2198     {
2199         m_makeJumpPatchable = true;
2200         Jump result = branchTest32(cond, reg, mask);
2201         m_makeJumpPatchable = false;
2202         return PatchableJump(result);
2203     }
2204
2205     PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
2206     {
2207         m_makeJumpPatchable = true;
2208         Jump result = branch32(cond, reg, imm);
2209         m_makeJumpPatchable = false;
2210         return PatchableJump(result);
2211     }
2212
2213     PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
2214     {
2215         m_makeJumpPatchable = true;
2216         Jump result = branchPtrWithPatch(cond, left, dataLabel, initialRightValue);
2217         m_makeJumpPatchable = false;
2218         return PatchableJump(result);
2219     }
2220
2221     PatchableJump patchableJump()
2222     {
2223         m_makeJumpPatchable = true;
2224         Jump result = jump();
2225         m_makeJumpPatchable = false;
2226         return PatchableJump(result);
2227     }
2228
2229     ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
2230     {
2231         DataLabelPtr label(this);
2232         moveWithFixedWidth(initialValue, getCachedDataTempRegisterIDAndInvalidate());
2233         store64(dataTempRegister, address);
2234         return label;
2235     }
2236
2237     ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address)
2238     {
2239         return storePtrWithPatch(TrustedImmPtr(0), address);
2240     }
2241
2242     static void reemitInitialMoveWithPatch(void* address, void* value)
2243     {
2244         ARM64Assembler::setPointer(static_cast<int*>(address), value, dataTempRegister, true);
2245     }
2246
2247     // Miscellaneous operations:
2248
2249     void breakpoint(uint16_t imm = 0)
2250     {
2251         m_assembler.brk(imm);
2252     }
2253
2254     void nop()
2255     {
2256         m_assembler.nop();
2257     }
2258     
2259     void memoryFence()
2260     {
2261         m_assembler.dmbSY();
2262     }
2263
2264
2265     // Misc helper functions.
2266
2267     // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
2268     static RelationalCondition invert(RelationalCondition cond)
2269     {
2270         return static_cast<RelationalCondition>(ARM64Assembler::invert(static_cast<ARM64Assembler::Condition>(cond)));
2271     }
2272
2273     static FunctionPtr readCallTarget(CodeLocationCall call)
2274     {
2275         return FunctionPtr(reinterpret_cast<void(*)()>(ARM64Assembler::readCallTarget(call.dataLocation())));
2276     }
2277
2278     static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
2279     {
2280         ARM64Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
2281     }
2282     
2283     static ptrdiff_t maxJumpReplacementSize()
2284     {
2285         return ARM64Assembler::maxJumpReplacementSize();
2286     }
2287
2288     RegisterID scratchRegisterForBlinding() { return getCachedDataTempRegisterIDAndInvalidate(); }
2289
2290     static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
2291     
2292     static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
2293     {
2294         return label.labelAtOffset(0);
2295     }
2296     
2297     static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
2298     {
2299         UNREACHABLE_FOR_PLATFORM();
2300         return CodeLocationLabel();
2301     }
2302     
2303     static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
2304     {
2305         reemitInitialMoveWithPatch(instructionStart.dataLocation(), initialValue);
2306     }
2307     
2308     static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
2309     {
2310         UNREACHABLE_FOR_PLATFORM();
2311     }
2312
2313 protected:
2314     ALWAYS_INLINE Jump makeBranch(ARM64Assembler::Condition cond)
2315     {
2316         m_assembler.b_cond(cond);
2317         AssemblerLabel label = m_assembler.label();
2318         m_assembler.nop();
2319         return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpConditionFixedSize : ARM64Assembler::JumpCondition, cond);
2320     }
2321     ALWAYS_INLINE Jump makeBranch(RelationalCondition cond) { return makeBranch(ARM64Condition(cond)); }
2322     ALWAYS_INLINE Jump makeBranch(ResultCondition cond) { return makeBranch(ARM64Condition(cond)); }
2323     ALWAYS_INLINE Jump makeBranch(DoubleCondition cond) { return makeBranch(ARM64Condition(cond)); }
2324
2325     template <int dataSize>
2326     ALWAYS_INLINE Jump makeCompareAndBranch(ZeroCondition cond, RegisterID reg)
2327     {
2328         if (cond == IsZero)
2329             m_assembler.cbz<dataSize>(reg);
2330         else
2331             m_assembler.cbnz<dataSize>(reg);
2332         AssemblerLabel label = m_assembler.label();
2333         m_assembler.nop();
2334         return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpCompareAndBranchFixedSize : ARM64Assembler::JumpCompareAndBranch, static_cast<ARM64Assembler::Condition>(cond), dataSize == 64, reg);
2335     }
2336
2337     ALWAYS_INLINE Jump makeTestBitAndBranch(RegisterID reg, unsigned bit, ZeroCondition cond)
2338     {
2339         ASSERT(bit < 64);
2340         bit &= 0x3f;
2341         if (cond == IsZero)
2342             m_assembler.tbz(reg, bit);
2343         else
2344             m_assembler.tbnz(reg, bit);
2345         AssemblerLabel label = m_assembler.label();
2346         m_assembler.nop();
2347         return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpTestBitFixedSize : ARM64Assembler::JumpTestBit, static_cast<ARM64Assembler::Condition>(cond), bit, reg);
2348     }
2349
2350     ARM64Assembler::Condition ARM64Condition(RelationalCondition cond)
2351     {
2352         return static_cast<ARM64Assembler::Condition>(cond);
2353     }
2354
2355     ARM64Assembler::Condition ARM64Condition(ResultCondition cond)
2356     {
2357         return static_cast<ARM64Assembler::Condition>(cond);
2358     }
2359
2360     ARM64Assembler::Condition ARM64Condition(DoubleCondition cond)
2361     {
2362         return static_cast<ARM64Assembler::Condition>(cond);
2363     }
2364     
2365 private:
2366     ALWAYS_INLINE RegisterID getCachedDataTempRegisterIDAndInvalidate() { return m_dataMemoryTempRegister.registerIDInvalidate(); }
2367     ALWAYS_INLINE RegisterID getCachedMemoryTempRegisterIDAndInvalidate() { return m_cachedMemoryTempRegister.registerIDInvalidate(); }
2368
2369     ALWAYS_INLINE bool isInIntRange(intptr_t value)
2370     {
2371         return value == ((value << 32) >> 32);
2372     }
2373
2374     template<typename ImmediateType, typename rawType>
2375     void moveInternal(ImmediateType imm, RegisterID dest)
2376     {
2377         const int dataSize = sizeof(rawType) * 8;
2378         const int numberHalfWords = dataSize / 16;
2379         rawType value = bitwise_cast<rawType>(imm.m_value);
2380         uint16_t halfword[numberHalfWords];
2381
2382         // Handle 0 and ~0 here to simplify code below
2383         if (!value) {
2384             m_assembler.movz<dataSize>(dest, 0);
2385             return;
2386         }
2387         if (!~value) {
2388             m_assembler.movn<dataSize>(dest, 0);
2389             return;
2390         }
2391
2392         LogicalImmediate logicalImm = dataSize == 64 ? LogicalImmediate::create64(static_cast<uint64_t>(value)) : LogicalImmediate::create32(static_cast<uint32_t>(value));
2393
2394         if (logicalImm.isValid()) {
2395             m_assembler.movi<dataSize>(dest, logicalImm);
2396             return;
2397         }
2398
2399         // Figure out how many halfwords are 0 or FFFF, then choose movz or movn accordingly.
2400         int zeroOrNegateVote = 0;
2401         for (int i = 0; i < numberHalfWords; ++i) {
2402             halfword[i] = getHalfword(value, i);
2403             if (!halfword[i])
2404                 zeroOrNegateVote++;
2405             else if (halfword[i] == 0xffff)
2406                 zeroOrNegateVote--;
2407         }
2408
2409         bool needToClearRegister = true;
2410         if (zeroOrNegateVote >= 0) {
2411             for (int i = 0; i < numberHalfWords; i++) {
2412                 if (halfword[i]) {
2413                     if (needToClearRegister) {
2414                         m_assembler.movz<dataSize>(dest, halfword[i], 16*i);
2415                         needToClearRegister = false;
2416                     } else
2417                         m_assembler.movk<dataSize>(dest, halfword[i], 16*i);
2418                 }
2419             }
2420         } else {
2421             for (int i = 0; i < numberHalfWords; i++) {
2422                 if (halfword[i] != 0xffff) {
2423                     if (needToClearRegister) {
2424                         m_assembler.movn<dataSize>(dest, ~halfword[i], 16*i);
2425                         needToClearRegister = false;
2426                     } else
2427                         m_assembler.movk<dataSize>(dest, halfword[i], 16*i);
2428                 }
2429             }
2430         }
2431     }
2432
2433     template<int datasize>
2434     ALWAYS_INLINE void loadUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
2435     {
2436         m_assembler.ldr<datasize>(rt, rn, pimm);
2437     }
2438
2439     template<int datasize>
2440     ALWAYS_INLINE void loadUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
2441     {
2442         m_assembler.ldur<datasize>(rt, rn, simm);
2443     }
2444
2445     template<int datasize>
2446     ALWAYS_INLINE void storeUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
2447     {
2448         m_assembler.str<datasize>(rt, rn, pimm);
2449     }
2450
2451     template<int datasize>
2452     ALWAYS_INLINE void storeUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
2453     {
2454         m_assembler.stur<datasize>(rt, rn, simm);
2455     }
2456
2457     void moveWithFixedWidth(TrustedImm32 imm, RegisterID dest)
2458     {
2459         int32_t value = imm.m_value;
2460         m_assembler.movz<32>(dest, getHalfword(value, 0));
2461         m_assembler.movk<32>(dest, getHalfword(value, 1), 16);
2462     }
2463
2464     void moveWithFixedWidth(TrustedImmPtr imm, RegisterID dest)
2465     {
2466         intptr_t value = reinterpret_cast<intptr_t>(imm.m_value);
2467         m_assembler.movz<64>(dest, getHalfword(value, 0));
2468         m_assembler.movk<64>(dest, getHalfword(value, 1), 16);
2469         m_assembler.movk<64>(dest, getHalfword(value, 2), 32);
2470     }
2471
2472     void signExtend32ToPtrWithFixedWidth(int32_t value, RegisterID dest)
2473     {
2474         if (value >= 0) {
2475             m_assembler.movz<32>(dest, getHalfword(value, 0));
2476             m_assembler.movk<32>(dest, getHalfword(value, 1), 16);
2477         } else {
2478             m_assembler.movn<32>(dest, ~getHalfword(value, 0));
2479             m_assembler.movk<32>(dest, getHalfword(value, 1), 16);
2480         }
2481     }
2482
2483     void signExtend32ToPtr(TrustedImm32 imm, RegisterID dest)
2484     {
2485         move(TrustedImmPtr(reinterpret_cast<void*>(static_cast<intptr_t>(imm.m_value))), dest);
2486     }
2487
2488     template<int datasize>
2489     ALWAYS_INLINE void load(const void* address, RegisterID dest)
2490     {
2491         intptr_t currentRegisterContents;
2492         if (m_cachedMemoryTempRegister.value(currentRegisterContents)) {
2493             intptr_t addressAsInt = reinterpret_cast<intptr_t>(address);
2494             intptr_t addressDelta = addressAsInt - currentRegisterContents;
2495
2496             if (isInIntRange(addressDelta)) {
2497                 if (ARM64Assembler::canEncodeSImmOffset(addressDelta)) {
2498                     m_assembler.ldur<datasize>(dest,  memoryTempRegister, addressDelta);
2499                     return;
2500                 }
2501
2502                 if (ARM64Assembler::canEncodePImmOffset<datasize>(addressDelta)) {
2503                     m_assembler.ldr<datasize>(dest,  memoryTempRegister, addressDelta);
2504                     return;
2505                 }
2506             }
2507
2508             if ((addressAsInt & (~maskHalfWord0)) == (currentRegisterContents & (~maskHalfWord0))) {
2509                 m_assembler.movk<64>(memoryTempRegister, addressAsInt & maskHalfWord0, 0);
2510                 m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
2511                 m_assembler.ldr<datasize>(dest, memoryTempRegister, ARM64Registers::zr);
2512                 return;
2513             }
2514         }
2515
2516         move(TrustedImmPtr(address), memoryTempRegister);
2517         m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
2518         m_assembler.ldr<datasize>(dest, memoryTempRegister, ARM64Registers::zr);
2519     }
2520
2521     template<int datasize>
2522     ALWAYS_INLINE void store(RegisterID src, const void* address)
2523     {
2524         intptr_t currentRegisterContents;
2525         if (m_cachedMemoryTempRegister.value(currentRegisterContents)) {
2526             intptr_t addressAsInt = reinterpret_cast<intptr_t>(address);
2527             intptr_t addressDelta = addressAsInt - currentRegisterContents;
2528
2529             if (isInIntRange(addressDelta)) {
2530                 if (ARM64Assembler::canEncodeSImmOffset(addressDelta)) {
2531                     m_assembler.stur<datasize>(src, memoryTempRegister, addressDelta);
2532                     return;
2533                 }
2534
2535                 if (ARM64Assembler::canEncodePImmOffset<datasize>(addressDelta)) {
2536                     m_assembler.str<datasize>(src, memoryTempRegister, addressDelta);
2537                     return;
2538                 }
2539             }
2540
2541             if ((addressAsInt & (~maskHalfWord0)) == (currentRegisterContents & (~maskHalfWord0))) {
2542                 m_assembler.movk<64>(memoryTempRegister, addressAsInt & maskHalfWord0, 0);
2543                 m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
2544                 m_assembler.str<datasize>(src, memoryTempRegister, ARM64Registers::zr);
2545                 return;
2546             }
2547         }
2548
2549         move(TrustedImmPtr(address), memoryTempRegister);
2550         m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
2551         m_assembler.str<datasize>(src, memoryTempRegister, ARM64Registers::zr);
2552     }
2553
2554     template <int dataSize>
2555     ALWAYS_INLINE bool tryMoveUsingCacheRegisterContents(intptr_t immediate, CachedTempRegister& dest)
2556     {
2557         intptr_t currentRegisterContents;
2558         if (dest.value(currentRegisterContents)) {
2559             if (currentRegisterContents == immediate)
2560                 return true;
2561
2562             LogicalImmediate logicalImm = dataSize == 64 ? LogicalImmediate::create64(static_cast<uint64_t>(immediate)) : LogicalImmediate::create32(static_cast<uint32_t>(immediate));
2563
2564             if (logicalImm.isValid()) {
2565                 m_assembler.movi<dataSize>(dest.registerIDNoInvalidate(), logicalImm);
2566                 dest.setValue(immediate);
2567                 return true;
2568             }
2569
2570             if ((immediate & maskUpperWord) == (currentRegisterContents & maskUpperWord)) {
2571                 if ((immediate & maskHalfWord1) != (currentRegisterContents & maskHalfWord1))
2572                     m_assembler.movk<dataSize>(dest.registerIDNoInvalidate(), (immediate & maskHalfWord1) >> 16, 16);
2573
2574                 if ((immediate & maskHalfWord0) != (currentRegisterContents & maskHalfWord0))
2575                     m_assembler.movk<dataSize>(dest.registerIDNoInvalidate(), immediate & maskHalfWord0, 0);
2576
2577                 dest.setValue(immediate);
2578                 return true;
2579             }
2580         }
2581
2582         return false;
2583     }
2584
2585     void moveToCachedReg(TrustedImm32 imm, CachedTempRegister& dest)
2586     {
2587         if (tryMoveUsingCacheRegisterContents<32>(static_cast<intptr_t>(imm.m_value), dest))
2588             return;
2589
2590         moveInternal<TrustedImm32, int32_t>(imm, dest.registerIDNoInvalidate());
2591         dest.setValue(imm.m_value);
2592     }
2593
2594     void moveToCachedReg(TrustedImmPtr imm, CachedTempRegister& dest)
2595     {
2596         if (tryMoveUsingCacheRegisterContents<64>(imm.asIntptr(), dest))
2597             return;
2598
2599         moveInternal<TrustedImmPtr, intptr_t>(imm, dest.registerIDNoInvalidate());
2600         dest.setValue(imm.asIntptr());
2601     }
2602
2603     void moveToCachedReg(TrustedImm64 imm, CachedTempRegister& dest)
2604     {
2605         if (tryMoveUsingCacheRegisterContents<64>(static_cast<intptr_t>(imm.m_value), dest))
2606             return;
2607
2608         moveInternal<TrustedImm64, int64_t>(imm, dest.registerIDNoInvalidate());
2609         dest.setValue(imm.m_value);
2610     }
2611
2612     template<int datasize>
2613     ALWAYS_INLINE bool tryLoadWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
2614     {
2615         if (ARM64Assembler::canEncodeSImmOffset(offset)) {
2616             loadUnscaledImmediate<datasize>(rt, rn, offset);
2617             return true;
2618         }
2619         if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
2620             loadUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset));
2621             return true;
2622         }
2623         return false;
2624     }
2625
2626     template<int datasize>
2627     ALWAYS_INLINE bool tryLoadWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset)
2628     {
2629         if (ARM64Assembler::canEncodeSImmOffset(offset)) {
2630             m_assembler.ldur<datasize>(rt, rn, offset);
2631             return true;
2632         }
2633         if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
2634             m_assembler.ldr<datasize>(rt, rn, static_cast<unsigned>(offset));
2635             return true;
2636         }
2637         return false;
2638     }
2639
2640     template<int datasize>
2641     ALWAYS_INLINE bool tryStoreWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
2642     {
2643         if (ARM64Assembler::canEncodeSImmOffset(offset)) {
2644             storeUnscaledImmediate<datasize>(rt, rn, offset);
2645             return true;
2646         }
2647         if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
2648             storeUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset));
2649             return true;
2650         }
2651         return false;
2652     }
2653
2654     template<int datasize>
2655     ALWAYS_INLINE bool tryStoreWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset)
2656     {
2657         if (ARM64Assembler::canEncodeSImmOffset(offset)) {
2658             m_assembler.stur<datasize>(rt, rn, offset);
2659             return true;
2660         }
2661         if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
2662             m_assembler.str<datasize>(rt, rn, static_cast<unsigned>(offset));
2663             return true;
2664         }
2665         return false;
2666     }
2667
2668     friend class LinkBuffer;
2669     friend class RepatchBuffer;
2670
2671     static void linkCall(void* code, Call call, FunctionPtr function)
2672     {
2673         if (call.isFlagSet(Call::Near))
2674             ARM64Assembler::linkCall(code, call.m_label, function.value());
2675         else
2676             ARM64Assembler::linkPointer(code, call.m_label.labelAtOffset(REPATCH_OFFSET_CALL_TO_POINTER), function.value());
2677     }
2678
2679     static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
2680     {
2681         ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
2682     }
2683
2684     static void repatchCall(CodeLocationCall call, FunctionPtr destination)
2685     {
2686         ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
2687     }
2688
2689     CachedTempRegister m_dataMemoryTempRegister;
2690     CachedTempRegister m_cachedMemoryTempRegister;
2691     bool m_makeJumpPatchable;
2692 };
2693
2694 // Extend the {load,store}{Unsigned,Unscaled}Immediate templated general register methods to cover all load/store sizes
2695 template<>
2696 ALWAYS_INLINE void MacroAssemblerARM64::loadUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm)
2697 {
2698     m_assembler.ldrb(rt, rn, pimm);
2699 }
2700
2701 template<>
2702 ALWAYS_INLINE void MacroAssemblerARM64::loadUnsignedImmediate<16>(RegisterID rt, RegisterID rn, unsigned pimm)
2703 {
2704     m_assembler.ldrh(rt, rn, pimm);
2705 }
2706
2707 template<>
2708 ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm)
2709 {
2710     m_assembler.ldurb(rt, rn, simm);
2711 }
2712
2713 template<>
2714 ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate<16>(RegisterID rt, RegisterID rn, int simm)
2715 {
2716     m_assembler.ldurh(rt, rn, simm);
2717 }
2718
2719 template<>
2720 ALWAYS_INLINE void MacroAssemblerARM64::storeUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm)
2721 {
2722     m_assembler.strb(rt, rn, pimm);
2723 }
2724
2725 template<>
2726 ALWAYS_INLINE void MacroAssemblerARM64::storeUnsignedImmediate<16>(RegisterID rt, RegisterID rn, unsigned pimm)
2727 {
2728     m_assembler.strh(rt, rn, pimm);
2729 }
2730
2731 template<>
2732 ALWAYS_INLINE void MacroAssemblerARM64::storeUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm)
2733 {
2734     m_assembler.sturb(rt, rn, simm);
2735 }
2736
2737 template<>
2738 ALWAYS_INLINE void MacroAssemblerARM64::storeUnscaledImmediate<16>(RegisterID rt, RegisterID rn, int simm)
2739 {
2740     m_assembler.sturh(rt, rn, simm);
2741 }
2742
2743 } // namespace JSC
2744
2745 #endif // ENABLE(ASSEMBLER)
2746
2747 #endif // MacroAssemblerARM64_h