4513ed2750ea3d524587614a8b0550f8a1c85102
[WebKit-https.git] / Source / JavaScriptCore / assembler / MacroAssemblerARMv7.h
1 /*
2  * Copyright (C) 2009-2010, 2014-2015 Apple Inc. All rights reserved.
3  * Copyright (C) 2010 University of Szeged
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
18  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
25  */
26
27 #ifndef MacroAssemblerARMv7_h
28 #define MacroAssemblerARMv7_h
29
30 #if ENABLE(ASSEMBLER)
31
32 #include "ARMv7Assembler.h"
33 #include "AbstractMacroAssembler.h"
34
35 namespace JSC {
36
37 class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler, MacroAssemblerARMv7> {
38     static const RegisterID dataTempRegister = ARMRegisters::ip;
39     static const RegisterID addressTempRegister = ARMRegisters::r6;
40
41     static const ARMRegisters::FPDoubleRegisterID fpTempRegister = ARMRegisters::d7;
42     inline ARMRegisters::FPSingleRegisterID fpTempRegisterAsSingle() { return ARMRegisters::asSingle(fpTempRegister); }
43
44 public:
45     static const unsigned numGPRs = 16;
46     static const unsigned numFPRs = 16;
47     
48     MacroAssemblerARMv7()
49         : m_makeJumpPatchable(false)
50     {
51     }
52
53     typedef ARMv7Assembler::LinkRecord LinkRecord;
54     typedef ARMv7Assembler::JumpType JumpType;
55     typedef ARMv7Assembler::JumpLinkType JumpLinkType;
56     typedef ARMv7Assembler::Condition Condition;
57
58     static const ARMv7Assembler::Condition DefaultCondition = ARMv7Assembler::ConditionInvalid;
59     static const ARMv7Assembler::JumpType DefaultJump = ARMv7Assembler::JumpNoConditionFixedSize;
60
61     static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
62     {
63         return value >= -255 && value <= 255;
64     }
65
66     Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); }
67     void* unlinkedCode() { return m_assembler.unlinkedCode(); }
68     static bool canCompact(JumpType jumpType) { return ARMv7Assembler::canCompact(jumpType); }
69     static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return ARMv7Assembler::computeJumpType(jumpType, from, to); }
70     static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return ARMv7Assembler::computeJumpType(record, from, to); }
71     static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return ARMv7Assembler::jumpSizeDelta(jumpType, jumpLinkType); }
72     static void link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction, uint8_t* to) { return ARMv7Assembler::link(record, from, fromInstruction, to); }
73
74     struct ArmAddress {
75         enum AddressType {
76             HasOffset,
77             HasIndex,
78         } type;
79         RegisterID base;
80         union {
81             int32_t offset;
82             struct {
83                 RegisterID index;
84                 Scale scale;
85             };
86         } u;
87         
88         explicit ArmAddress(RegisterID base, int32_t offset = 0)
89             : type(HasOffset)
90             , base(base)
91         {
92             u.offset = offset;
93         }
94         
95         explicit ArmAddress(RegisterID base, RegisterID index, Scale scale = TimesOne)
96             : type(HasIndex)
97             , base(base)
98         {
99             u.index = index;
100             u.scale = scale;
101         }
102     };
103     
104 public:
105     static const Scale ScalePtr = TimesFour;
106
107     enum RelationalCondition {
108         Equal = ARMv7Assembler::ConditionEQ,
109         NotEqual = ARMv7Assembler::ConditionNE,
110         Above = ARMv7Assembler::ConditionHI,
111         AboveOrEqual = ARMv7Assembler::ConditionHS,
112         Below = ARMv7Assembler::ConditionLO,
113         BelowOrEqual = ARMv7Assembler::ConditionLS,
114         GreaterThan = ARMv7Assembler::ConditionGT,
115         GreaterThanOrEqual = ARMv7Assembler::ConditionGE,
116         LessThan = ARMv7Assembler::ConditionLT,
117         LessThanOrEqual = ARMv7Assembler::ConditionLE
118     };
119
120     enum ResultCondition {
121         Overflow = ARMv7Assembler::ConditionVS,
122         Signed = ARMv7Assembler::ConditionMI,
123         PositiveOrZero = ARMv7Assembler::ConditionPL,
124         Zero = ARMv7Assembler::ConditionEQ,
125         NonZero = ARMv7Assembler::ConditionNE
126     };
127
128     enum DoubleCondition {
129         // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
130         DoubleEqual = ARMv7Assembler::ConditionEQ,
131         DoubleNotEqual = ARMv7Assembler::ConditionVC, // Not the right flag! check for this & handle differently.
132         DoubleGreaterThan = ARMv7Assembler::ConditionGT,
133         DoubleGreaterThanOrEqual = ARMv7Assembler::ConditionGE,
134         DoubleLessThan = ARMv7Assembler::ConditionLO,
135         DoubleLessThanOrEqual = ARMv7Assembler::ConditionLS,
136         // If either operand is NaN, these conditions always evaluate to true.
137         DoubleEqualOrUnordered = ARMv7Assembler::ConditionVS, // Not the right flag! check for this & handle differently.
138         DoubleNotEqualOrUnordered = ARMv7Assembler::ConditionNE,
139         DoubleGreaterThanOrUnordered = ARMv7Assembler::ConditionHI,
140         DoubleGreaterThanOrEqualOrUnordered = ARMv7Assembler::ConditionHS,
141         DoubleLessThanOrUnordered = ARMv7Assembler::ConditionLT,
142         DoubleLessThanOrEqualOrUnordered = ARMv7Assembler::ConditionLE,
143     };
144
145     static const RegisterID stackPointerRegister = ARMRegisters::sp;
146     static const RegisterID framePointerRegister = ARMRegisters::fp;
147     static const RegisterID linkRegister = ARMRegisters::lr;
148
149     // Integer arithmetic operations:
150     //
151     // Operations are typically two operand - operation(source, srcDst)
152     // For many operations the source may be an TrustedImm32, the srcDst operand
153     // may often be a memory location (explictly described using an Address
154     // object).
155
156     void add32(RegisterID src, RegisterID dest)
157     {
158         m_assembler.add(dest, dest, src);
159     }
160
161     void add32(RegisterID left, RegisterID right, RegisterID dest)
162     {
163         m_assembler.add(dest, left, right);
164     }
165
166     void add32(TrustedImm32 imm, RegisterID dest)
167     {
168         add32(imm, dest, dest);
169     }
170     
171     void add32(AbsoluteAddress src, RegisterID dest)
172     {
173         load32(src.m_ptr, dataTempRegister);
174         add32(dataTempRegister, dest);
175     }
176
177     void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
178     {
179         ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
180
181         // For adds with stack pointer destination, moving the src first to sp is
182         // needed to avoid unpredictable instruction
183         if (dest == ARMRegisters::sp && src != dest) {
184             move(src, ARMRegisters::sp);
185             src = ARMRegisters::sp;
186         }
187
188         if (armImm.isValid())
189             m_assembler.add(dest, src, armImm);
190         else {
191             move(imm, dataTempRegister);
192             m_assembler.add(dest, src, dataTempRegister);
193         }
194     }
195
196     void add32(TrustedImm32 imm, Address address)
197     {
198         load32(address, dataTempRegister);
199
200         ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
201         if (armImm.isValid())
202             m_assembler.add(dataTempRegister, dataTempRegister, armImm);
203         else {
204             // Hrrrm, since dataTempRegister holds the data loaded,
205             // use addressTempRegister to hold the immediate.
206             move(imm, addressTempRegister);
207             m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
208         }
209
210         store32(dataTempRegister, address);
211     }
212
213     void add32(Address src, RegisterID dest)
214     {
215         load32(src, dataTempRegister);
216         add32(dataTempRegister, dest);
217     }
218
219     void add32(TrustedImm32 imm, AbsoluteAddress address)
220     {
221         load32(address.m_ptr, dataTempRegister);
222
223         ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
224         if (armImm.isValid())
225             m_assembler.add(dataTempRegister, dataTempRegister, armImm);
226         else {
227             // Hrrrm, since dataTempRegister holds the data loaded,
228             // use addressTempRegister to hold the immediate.
229             move(imm, addressTempRegister);
230             m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
231         }
232
233         store32(dataTempRegister, address.m_ptr);
234     }
235
236     void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest)
237     {
238         add32(imm, srcDest);
239     }
240     
241     void add64(TrustedImm32 imm, AbsoluteAddress address)
242     {
243         move(TrustedImmPtr(address.m_ptr), addressTempRegister);
244
245         m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(0));
246         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
247         if (armImm.isValid())
248             m_assembler.add_S(dataTempRegister, dataTempRegister, armImm);
249         else {
250             move(imm, addressTempRegister);
251             m_assembler.add_S(dataTempRegister, dataTempRegister, addressTempRegister);
252             move(TrustedImmPtr(address.m_ptr), addressTempRegister);
253         }
254         m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(0));
255
256         m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(4));
257         m_assembler.adc(dataTempRegister, dataTempRegister, ARMThumbImmediate::makeEncodedImm(imm.m_value >> 31));
258         m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(4));
259     }
260
261     void and32(RegisterID op1, RegisterID op2, RegisterID dest)
262     {
263         m_assembler.ARM_and(dest, op1, op2);
264     }
265
266     void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
267     {
268         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
269         if (armImm.isValid())
270             m_assembler.ARM_and(dest, src, armImm);
271         else {
272             move(imm, dataTempRegister);
273             m_assembler.ARM_and(dest, src, dataTempRegister);
274         }
275     }
276
277     void and32(RegisterID src, RegisterID dest)
278     {
279         and32(dest, src, dest);
280     }
281
282     void and32(TrustedImm32 imm, RegisterID dest)
283     {
284         and32(imm, dest, dest);
285     }
286
287     void and32(Address src, RegisterID dest)
288     {
289         load32(src, dataTempRegister);
290         and32(dataTempRegister, dest);
291     }
292
293     void countLeadingZeros32(RegisterID src, RegisterID dest)
294     {
295         m_assembler.clz(dest, src);
296     }
297
298     void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
299     {
300         // Clamp the shift to the range 0..31
301         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
302         ASSERT(armImm.isValid());
303         m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
304
305         m_assembler.lsl(dest, src, dataTempRegister);
306     }
307
308     void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
309     {
310         m_assembler.lsl(dest, src, imm.m_value & 0x1f);
311     }
312
313     void lshift32(RegisterID shiftAmount, RegisterID dest)
314     {
315         lshift32(dest, shiftAmount, dest);
316     }
317
318     void lshift32(TrustedImm32 imm, RegisterID dest)
319     {
320         lshift32(dest, imm, dest);
321     }
322
323     void mul32(RegisterID src, RegisterID dest)
324     {
325         m_assembler.smull(dest, dataTempRegister, dest, src);
326     }
327
328     void mul32(RegisterID left, RegisterID right, RegisterID dest)
329     {
330         m_assembler.smull(dest, dataTempRegister, left, right);
331     }
332
333     void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
334     {
335         move(imm, dataTempRegister);
336         m_assembler.smull(dest, dataTempRegister, src, dataTempRegister);
337     }
338
339     void neg32(RegisterID srcDest)
340     {
341         m_assembler.neg(srcDest, srcDest);
342     }
343
344     void or32(RegisterID src, RegisterID dest)
345     {
346         m_assembler.orr(dest, dest, src);
347     }
348     
349     void or32(RegisterID src, AbsoluteAddress dest)
350     {
351         move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
352         load32(addressTempRegister, dataTempRegister);
353         or32(src, dataTempRegister);
354         store32(dataTempRegister, addressTempRegister);
355     }
356
357     void or32(TrustedImm32 imm, AbsoluteAddress address)
358     {
359         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
360         if (armImm.isValid()) {
361             move(TrustedImmPtr(address.m_ptr), addressTempRegister);
362             load32(addressTempRegister, dataTempRegister);
363             m_assembler.orr(dataTempRegister, dataTempRegister, armImm);
364             store32(dataTempRegister, addressTempRegister);
365         } else {
366             move(TrustedImmPtr(address.m_ptr), addressTempRegister);
367             load32(addressTempRegister, dataTempRegister);
368             move(imm, addressTempRegister);
369             m_assembler.orr(dataTempRegister, dataTempRegister, addressTempRegister);
370             move(TrustedImmPtr(address.m_ptr), addressTempRegister);
371             store32(dataTempRegister, addressTempRegister);
372         }
373     }
374
375     void or32(TrustedImm32 imm, Address address)
376     {
377         load32(address, dataTempRegister);
378         or32(imm, dataTempRegister, dataTempRegister);
379         store32(dataTempRegister, address);
380     }
381
382     void or32(TrustedImm32 imm, RegisterID dest)
383     {
384         or32(imm, dest, dest);
385     }
386
387     void or32(RegisterID op1, RegisterID op2, RegisterID dest)
388     {
389         m_assembler.orr(dest, op1, op2);
390     }
391
392     void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
393     {
394         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
395         if (armImm.isValid())
396             m_assembler.orr(dest, src, armImm);
397         else {
398             ASSERT(src != dataTempRegister);
399             move(imm, dataTempRegister);
400             m_assembler.orr(dest, src, dataTempRegister);
401         }
402     }
403
404     void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
405     {
406         // Clamp the shift to the range 0..31
407         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
408         ASSERT(armImm.isValid());
409         m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
410
411         m_assembler.asr(dest, src, dataTempRegister);
412     }
413
414     void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
415     {
416         if (!imm.m_value)
417             move(src, dest);
418         else
419             m_assembler.asr(dest, src, imm.m_value & 0x1f);
420     }
421
422     void rshift32(RegisterID shiftAmount, RegisterID dest)
423     {
424         rshift32(dest, shiftAmount, dest);
425     }
426     
427     void rshift32(TrustedImm32 imm, RegisterID dest)
428     {
429         rshift32(dest, imm, dest);
430     }
431
432     void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
433     {
434         // Clamp the shift to the range 0..31
435         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
436         ASSERT(armImm.isValid());
437         m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
438         
439         m_assembler.lsr(dest, src, dataTempRegister);
440     }
441     
442     void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
443     {
444         if (!imm.m_value)
445             move(src, dest);
446         else
447             m_assembler.lsr(dest, src, imm.m_value & 0x1f);
448     }
449
450     void urshift32(RegisterID shiftAmount, RegisterID dest)
451     {
452         urshift32(dest, shiftAmount, dest);
453     }
454     
455     void urshift32(TrustedImm32 imm, RegisterID dest)
456     {
457         urshift32(dest, imm, dest);
458     }
459
460     void sub32(RegisterID src, RegisterID dest)
461     {
462         m_assembler.sub(dest, dest, src);
463     }
464
465     void sub32(TrustedImm32 imm, RegisterID dest)
466     {
467         ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
468         if (armImm.isValid())
469             m_assembler.sub(dest, dest, armImm);
470         else {
471             move(imm, dataTempRegister);
472             m_assembler.sub(dest, dest, dataTempRegister);
473         }
474     }
475
476     void sub32(TrustedImm32 imm, Address address)
477     {
478         load32(address, dataTempRegister);
479
480         ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
481         if (armImm.isValid())
482             m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
483         else {
484             // Hrrrm, since dataTempRegister holds the data loaded,
485             // use addressTempRegister to hold the immediate.
486             move(imm, addressTempRegister);
487             m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
488         }
489
490         store32(dataTempRegister, address);
491     }
492
493     void sub32(Address src, RegisterID dest)
494     {
495         load32(src, dataTempRegister);
496         sub32(dataTempRegister, dest);
497     }
498
499     void sub32(TrustedImm32 imm, AbsoluteAddress address)
500     {
501         load32(address.m_ptr, dataTempRegister);
502
503         ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
504         if (armImm.isValid())
505             m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
506         else {
507             // Hrrrm, since dataTempRegister holds the data loaded,
508             // use addressTempRegister to hold the immediate.
509             move(imm, addressTempRegister);
510             m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
511         }
512
513         store32(dataTempRegister, address.m_ptr);
514     }
515
516     void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
517     {
518         m_assembler.eor(dest, op1, op2);
519     }
520
521     void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
522     {
523         if (imm.m_value == -1) {
524             m_assembler.mvn(dest, src);
525             return;
526         }
527
528         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
529         if (armImm.isValid())
530             m_assembler.eor(dest, src, armImm);
531         else {
532             move(imm, dataTempRegister);
533             m_assembler.eor(dest, src, dataTempRegister);
534         }
535     }
536
537     void xor32(RegisterID src, RegisterID dest)
538     {
539         xor32(dest, src, dest);
540     }
541
542     void xor32(TrustedImm32 imm, RegisterID dest)
543     {
544         if (imm.m_value == -1)
545             m_assembler.mvn(dest, dest);
546         else
547             xor32(imm, dest, dest);
548     }
549     
550
551     // Memory access operations:
552     //
553     // Loads are of the form load(address, destination) and stores of the form
554     // store(source, address).  The source for a store may be an TrustedImm32.  Address
555     // operand objects to loads and store will be implicitly constructed if a
556     // register is passed.
557
558 private:
559     void load32(ArmAddress address, RegisterID dest)
560     {
561         if (address.type == ArmAddress::HasIndex)
562             m_assembler.ldr(dest, address.base, address.u.index, address.u.scale);
563         else if (address.u.offset >= 0) {
564             ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
565             ASSERT(armImm.isValid());
566             m_assembler.ldr(dest, address.base, armImm);
567         } else {
568             ASSERT(address.u.offset >= -255);
569             m_assembler.ldr(dest, address.base, address.u.offset, true, false);
570         }
571     }
572
573     void load16(ArmAddress address, RegisterID dest)
574     {
575         if (address.type == ArmAddress::HasIndex)
576             m_assembler.ldrh(dest, address.base, address.u.index, address.u.scale);
577         else if (address.u.offset >= 0) {
578             ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
579             ASSERT(armImm.isValid());
580             m_assembler.ldrh(dest, address.base, armImm);
581         } else {
582             ASSERT(address.u.offset >= -255);
583             m_assembler.ldrh(dest, address.base, address.u.offset, true, false);
584         }
585     }
586     
587     void load16SignedExtendTo32(ArmAddress address, RegisterID dest)
588     {
589         ASSERT(address.type == ArmAddress::HasIndex);
590         m_assembler.ldrsh(dest, address.base, address.u.index, address.u.scale);
591     }
592
593     void load8(ArmAddress address, RegisterID dest)
594     {
595         if (address.type == ArmAddress::HasIndex)
596             m_assembler.ldrb(dest, address.base, address.u.index, address.u.scale);
597         else if (address.u.offset >= 0) {
598             ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
599             ASSERT(armImm.isValid());
600             m_assembler.ldrb(dest, address.base, armImm);
601         } else {
602             ASSERT(address.u.offset >= -255);
603             m_assembler.ldrb(dest, address.base, address.u.offset, true, false);
604         }
605     }
606     
607     void load8SignedExtendTo32(ArmAddress address, RegisterID dest)
608     {
609         ASSERT(address.type == ArmAddress::HasIndex);
610         m_assembler.ldrsb(dest, address.base, address.u.index, address.u.scale);
611     }
612
613 protected:
614     void store32(RegisterID src, ArmAddress address)
615     {
616         if (address.type == ArmAddress::HasIndex)
617             m_assembler.str(src, address.base, address.u.index, address.u.scale);
618         else if (address.u.offset >= 0) {
619             ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
620             ASSERT(armImm.isValid());
621             m_assembler.str(src, address.base, armImm);
622         } else {
623             ASSERT(address.u.offset >= -255);
624             m_assembler.str(src, address.base, address.u.offset, true, false);
625         }
626     }
627
628 private:
629     void store8(RegisterID src, ArmAddress address)
630     {
631         if (address.type == ArmAddress::HasIndex)
632             m_assembler.strb(src, address.base, address.u.index, address.u.scale);
633         else if (address.u.offset >= 0) {
634             ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
635             ASSERT(armImm.isValid());
636             m_assembler.strb(src, address.base, armImm);
637         } else {
638             ASSERT(address.u.offset >= -255);
639             m_assembler.strb(src, address.base, address.u.offset, true, false);
640         }
641     }
642     
643     void store16(RegisterID src, ArmAddress address)
644     {
645         if (address.type == ArmAddress::HasIndex)
646             m_assembler.strh(src, address.base, address.u.index, address.u.scale);
647         else if (address.u.offset >= 0) {
648             ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
649             ASSERT(armImm.isValid());
650             m_assembler.strh(src, address.base, armImm);
651         } else {
652             ASSERT(address.u.offset >= -255);
653             m_assembler.strh(src, address.base, address.u.offset, true, false);
654         }
655     }
656
657 public:
658     void load32(ImplicitAddress address, RegisterID dest)
659     {
660         load32(setupArmAddress(address), dest);
661     }
662
663     void load32(BaseIndex address, RegisterID dest)
664     {
665         load32(setupArmAddress(address), dest);
666     }
667
668     void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
669     {
670         load32(setupArmAddress(address), dest);
671     }
672
673     void load16Unaligned(BaseIndex address, RegisterID dest)
674     {
675         load16(setupArmAddress(address), dest);
676     }
677
678     void load32(const void* address, RegisterID dest)
679     {
680         move(TrustedImmPtr(address), addressTempRegister);
681         m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
682     }
683     
684     void abortWithReason(AbortReason reason)
685     {
686         move(TrustedImm32(reason), dataTempRegister);
687         breakpoint();
688     }
689
690     void abortWithReason(AbortReason reason, intptr_t misc)
691     {
692         move(TrustedImm32(misc), addressTempRegister);
693         abortWithReason(reason);
694     }
695
696     ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
697     {
698         ConvertibleLoadLabel result(this);
699         ASSERT(address.offset >= 0 && address.offset <= 255);
700         m_assembler.ldrWide8BitImmediate(dest, address.base, address.offset);
701         return result;
702     }
703
704     void load8(ImplicitAddress address, RegisterID dest)
705     {
706         load8(setupArmAddress(address), dest);
707     }
708
709     void load8SignedExtendTo32(ImplicitAddress, RegisterID)
710     {
711         UNREACHABLE_FOR_PLATFORM();
712     }
713
714     void load8(BaseIndex address, RegisterID dest)
715     {
716         load8(setupArmAddress(address), dest);
717     }
718     
719     void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
720     {
721         load8SignedExtendTo32(setupArmAddress(address), dest);
722     }
723
724     void load8(const void* address, RegisterID dest)
725     {
726         move(TrustedImmPtr(address), dest);
727         load8(dest, dest);
728     }
729
730     DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
731     {
732         DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister);
733         load32(ArmAddress(address.base, dataTempRegister), dest);
734         return label;
735     }
736     
737     DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
738     {
739         padBeforePatch();
740
741         RegisterID base = address.base;
742         
743         DataLabelCompact label(this);
744         ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
745
746         m_assembler.ldr(dest, base, address.offset, true, false);
747         return label;
748     }
749
750     void load16(BaseIndex address, RegisterID dest)
751     {
752         m_assembler.ldrh(dest, makeBaseIndexBase(address), address.index, address.scale);
753     }
754     
755     void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
756     {
757         load16SignedExtendTo32(setupArmAddress(address), dest);
758     }
759     
760     void load16(ImplicitAddress address, RegisterID dest)
761     {
762         ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.offset);
763         if (armImm.isValid())
764             m_assembler.ldrh(dest, address.base, armImm);
765         else {
766             move(TrustedImm32(address.offset), dataTempRegister);
767             m_assembler.ldrh(dest, address.base, dataTempRegister);
768         }
769     }
770     
771     void load16SignedExtendTo32(ImplicitAddress, RegisterID)
772     {
773         UNREACHABLE_FOR_PLATFORM();
774     }
775
776     DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
777     {
778         DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister);
779         store32(src, ArmAddress(address.base, dataTempRegister));
780         return label;
781     }
782
783     void store32(RegisterID src, ImplicitAddress address)
784     {
785         store32(src, setupArmAddress(address));
786     }
787
788     void store32(RegisterID src, BaseIndex address)
789     {
790         store32(src, setupArmAddress(address));
791     }
792
793     void store32(TrustedImm32 imm, ImplicitAddress address)
794     {
795         move(imm, dataTempRegister);
796         store32(dataTempRegister, setupArmAddress(address));
797     }
798
799     void store32(TrustedImm32 imm, BaseIndex address)
800     {
801         move(imm, dataTempRegister);
802         store32(dataTempRegister, setupArmAddress(address));
803     }
804
805     void store32(RegisterID src, const void* address)
806     {
807         move(TrustedImmPtr(address), addressTempRegister);
808         m_assembler.str(src, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
809     }
810
811     void store32(TrustedImm32 imm, const void* address)
812     {
813         move(imm, dataTempRegister);
814         store32(dataTempRegister, address);
815     }
816
817     void store8(RegisterID src, Address address)
818     {
819         store8(src, setupArmAddress(address));
820     }
821     
822     void store8(RegisterID src, BaseIndex address)
823     {
824         store8(src, setupArmAddress(address));
825     }
826     
827     void store8(RegisterID src, void* address)
828     {
829         move(TrustedImmPtr(address), addressTempRegister);
830         store8(src, ArmAddress(addressTempRegister, 0));
831     }
832     
833     void store8(TrustedImm32 imm, void* address)
834     {
835         TrustedImm32 imm8(static_cast<int8_t>(imm.m_value));
836         move(imm8, dataTempRegister);
837         store8(dataTempRegister, address);
838     }
839     
840     void store8(TrustedImm32 imm, Address address)
841     {
842         TrustedImm32 imm8(static_cast<int8_t>(imm.m_value));
843         move(imm8, dataTempRegister);
844         store8(dataTempRegister, address);
845     }
846     
847     void store16(RegisterID src, BaseIndex address)
848     {
849         store16(src, setupArmAddress(address));
850     }
851
852     // Possibly clobbers src, but not on this architecture.
853     void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
854     {
855         m_assembler.vmov(dest1, dest2, src);
856     }
857     
858     void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch)
859     {
860         UNUSED_PARAM(scratch);
861         m_assembler.vmov(dest, src1, src2);
862     }
863
864     static bool shouldBlindForSpecificArch(uint32_t value)
865     {
866         ARMThumbImmediate immediate = ARMThumbImmediate::makeEncodedImm(value);
867
868         // Couldn't be encoded as an immediate, so assume it's untrusted.
869         if (!immediate.isValid())
870             return true;
871         
872         // If we can encode the immediate, we have less than 16 attacker
873         // controlled bits.
874         if (immediate.isEncodedImm())
875             return false;
876
877         // Don't let any more than 12 bits of an instruction word
878         // be controlled by an attacker.
879         return !immediate.isUInt12();
880     }
881
882     // Floating-point operations:
883
884     static bool supportsFloatingPoint() { return true; }
885     static bool supportsFloatingPointTruncate() { return true; }
886     static bool supportsFloatingPointSqrt() { return true; }
887     static bool supportsFloatingPointAbs() { return true; }
888     static bool supportsFloatingPointRounding() { return false; }
889
890     void loadDouble(ImplicitAddress address, FPRegisterID dest)
891     {
892         RegisterID base = address.base;
893         int32_t offset = address.offset;
894
895         // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
896         if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
897             add32(TrustedImm32(offset), base, addressTempRegister);
898             base = addressTempRegister;
899             offset = 0;
900         }
901         
902         m_assembler.vldr(dest, base, offset);
903     }
904
905     void loadFloat(ImplicitAddress address, FPRegisterID dest)
906     {
907         RegisterID base = address.base;
908         int32_t offset = address.offset;
909
910         // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
911         if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
912             add32(TrustedImm32(offset), base, addressTempRegister);
913             base = addressTempRegister;
914             offset = 0;
915         }
916         
917         m_assembler.flds(ARMRegisters::asSingle(dest), base, offset);
918     }
919
920     void loadDouble(BaseIndex address, FPRegisterID dest)
921     {
922         move(address.index, addressTempRegister);
923         lshift32(TrustedImm32(address.scale), addressTempRegister);
924         add32(address.base, addressTempRegister);
925         loadDouble(Address(addressTempRegister, address.offset), dest);
926     }
927     
928     void loadFloat(BaseIndex address, FPRegisterID dest)
929     {
930         move(address.index, addressTempRegister);
931         lshift32(TrustedImm32(address.scale), addressTempRegister);
932         add32(address.base, addressTempRegister);
933         loadFloat(Address(addressTempRegister, address.offset), dest);
934     }
935
936     void moveDouble(FPRegisterID src, FPRegisterID dest)
937     {
938         if (src != dest)
939             m_assembler.vmov(dest, src);
940     }
941
942     void moveZeroToDouble(FPRegisterID reg)
943     {
944         static double zeroConstant = 0.;
945         loadDouble(TrustedImmPtr(&zeroConstant), reg);
946     }
947
948     void loadDouble(TrustedImmPtr address, FPRegisterID dest)
949     {
950         move(address, addressTempRegister);
951         m_assembler.vldr(dest, addressTempRegister, 0);
952     }
953
954     void storeDouble(FPRegisterID src, ImplicitAddress address)
955     {
956         RegisterID base = address.base;
957         int32_t offset = address.offset;
958
959         // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
960         if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
961             add32(TrustedImm32(offset), base, addressTempRegister);
962             base = addressTempRegister;
963             offset = 0;
964         }
965         
966         m_assembler.vstr(src, base, offset);
967     }
968
969     void storeFloat(FPRegisterID src, ImplicitAddress address)
970     {
971         RegisterID base = address.base;
972         int32_t offset = address.offset;
973
974         // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
975         if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
976             add32(TrustedImm32(offset), base, addressTempRegister);
977             base = addressTempRegister;
978             offset = 0;
979         }
980         
981         m_assembler.fsts(ARMRegisters::asSingle(src), base, offset);
982     }
983
984     void storeDouble(FPRegisterID src, TrustedImmPtr address)
985     {
986         move(address, addressTempRegister);
987         storeDouble(src, addressTempRegister);
988     }
989
990     void storeDouble(FPRegisterID src, BaseIndex address)
991     {
992         move(address.index, addressTempRegister);
993         lshift32(TrustedImm32(address.scale), addressTempRegister);
994         add32(address.base, addressTempRegister);
995         storeDouble(src, Address(addressTempRegister, address.offset));
996     }
997     
998     void storeFloat(FPRegisterID src, BaseIndex address)
999     {
1000         move(address.index, addressTempRegister);
1001         lshift32(TrustedImm32(address.scale), addressTempRegister);
1002         add32(address.base, addressTempRegister);
1003         storeFloat(src, Address(addressTempRegister, address.offset));
1004     }
1005     
1006     void addDouble(FPRegisterID src, FPRegisterID dest)
1007     {
1008         m_assembler.vadd(dest, dest, src);
1009     }
1010
1011     void addDouble(Address src, FPRegisterID dest)
1012     {
1013         loadDouble(src, fpTempRegister);
1014         addDouble(fpTempRegister, dest);
1015     }
1016
1017     void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1018     {
1019         m_assembler.vadd(dest, op1, op2);
1020     }
1021
1022     void addDouble(AbsoluteAddress address, FPRegisterID dest)
1023     {
1024         loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister);
1025         m_assembler.vadd(dest, dest, fpTempRegister);
1026     }
1027
1028     void divDouble(FPRegisterID src, FPRegisterID dest)
1029     {
1030         m_assembler.vdiv(dest, dest, src);
1031     }
1032
1033     void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1034     {
1035         m_assembler.vdiv(dest, op1, op2);
1036     }
1037
1038     void subDouble(FPRegisterID src, FPRegisterID dest)
1039     {
1040         m_assembler.vsub(dest, dest, src);
1041     }
1042
1043     void subDouble(Address src, FPRegisterID dest)
1044     {
1045         loadDouble(src, fpTempRegister);
1046         subDouble(fpTempRegister, dest);
1047     }
1048
1049     void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1050     {
1051         m_assembler.vsub(dest, op1, op2);
1052     }
1053
1054     void mulDouble(FPRegisterID src, FPRegisterID dest)
1055     {
1056         m_assembler.vmul(dest, dest, src);
1057     }
1058
1059     void mulDouble(Address src, FPRegisterID dest)
1060     {
1061         loadDouble(src, fpTempRegister);
1062         mulDouble(fpTempRegister, dest);
1063     }
1064
1065     void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1066     {
1067         m_assembler.vmul(dest, op1, op2);
1068     }
1069
1070     void sqrtDouble(FPRegisterID src, FPRegisterID dest)
1071     {
1072         m_assembler.vsqrt(dest, src);
1073     }
1074     
1075     void absDouble(FPRegisterID src, FPRegisterID dest)
1076     {
1077         m_assembler.vabs(dest, src);
1078     }
1079
1080     void negateDouble(FPRegisterID src, FPRegisterID dest)
1081     {
1082         m_assembler.vneg(dest, src);
1083     }
1084
1085     NO_RETURN_DUE_TO_CRASH void ceilDouble(FPRegisterID, FPRegisterID)
1086     {
1087         ASSERT(!supportsFloatingPointRounding());
1088         CRASH();
1089     }
1090
1091     NO_RETURN_DUE_TO_CRASH void floorDouble(FPRegisterID, FPRegisterID)
1092     {
1093         ASSERT(!supportsFloatingPointRounding());
1094         CRASH();
1095     }
1096
1097     NO_RETURN_DUE_TO_CRASH void roundTowardZeroDouble(FPRegisterID, FPRegisterID)
1098     {
1099         ASSERT(!supportsFloatingPointRounding());
1100         CRASH();
1101     }
1102
1103     void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
1104     {
1105         m_assembler.vmov(fpTempRegister, src, src);
1106         m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
1107     }
1108
1109     void convertInt32ToDouble(Address address, FPRegisterID dest)
1110     {
1111         // Fixme: load directly into the fpr!
1112         load32(address, dataTempRegister);
1113         m_assembler.vmov(fpTempRegister, dataTempRegister, dataTempRegister);
1114         m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
1115     }
1116
1117     void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest)
1118     {
1119         // Fixme: load directly into the fpr!
1120         load32(address.m_ptr, dataTempRegister);
1121         m_assembler.vmov(fpTempRegister, dataTempRegister, dataTempRegister);
1122         m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
1123     }
1124     
1125     void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
1126     {
1127         m_assembler.vcvtds(dst, ARMRegisters::asSingle(src));
1128     }
1129     
1130     void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
1131     {
1132         m_assembler.vcvtsd(ARMRegisters::asSingle(dst), src);
1133     }
1134
1135     Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
1136     {
1137         m_assembler.vcmp(left, right);
1138         m_assembler.vmrs();
1139
1140         if (cond == DoubleNotEqual) {
1141             // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
1142             Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
1143             Jump result = makeBranch(ARMv7Assembler::ConditionNE);
1144             unordered.link(this);
1145             return result;
1146         }
1147         if (cond == DoubleEqualOrUnordered) {
1148             Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
1149             Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
1150             unordered.link(this);
1151             // We get here if either unordered or equal.
1152             Jump result = jump();
1153             notEqual.link(this);
1154             return result;
1155         }
1156         return makeBranch(cond);
1157     }
1158
1159     enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
1160     Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
1161     {
1162         // Convert into dest.
1163         m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
1164         m_assembler.vmov(dest, fpTempRegisterAsSingle());
1165
1166         // Calculate 2x dest.  If the value potentially underflowed, it will have
1167         // clamped to 0x80000000, so 2x dest is zero in this case. In the case of
1168         // overflow the result will be equal to -2.
1169         Jump underflow = branchAdd32(Zero, dest, dest, dataTempRegister);
1170         Jump noOverflow = branch32(NotEqual, dataTempRegister, TrustedImm32(-2));
1171
1172         // For BranchIfTruncateSuccessful, we branch if 'noOverflow' jumps.
1173         underflow.link(this);
1174         if (branchType == BranchIfTruncateSuccessful)
1175             return noOverflow;
1176
1177         // We'll reach the current point in the code on failure, so plant a
1178         // jump here & link the success case.
1179         Jump failure = jump();
1180         noOverflow.link(this);
1181         return failure;
1182     }
1183
1184     // Result is undefined if the value is outside of the integer range.
1185     void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
1186     {
1187         m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
1188         m_assembler.vmov(dest, fpTempRegisterAsSingle());
1189     }
1190
1191     void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
1192     {
1193         m_assembler.vcvt_floatingPointToUnsigned(fpTempRegisterAsSingle(), src);
1194         m_assembler.vmov(dest, fpTempRegisterAsSingle());
1195     }
1196     
1197     // Convert 'src' to an integer, and places the resulting 'dest'.
1198     // If the result is not representable as a 32 bit value, branch.
1199     // May also branch for some values that are representable in 32 bits
1200     // (specifically, in this case, 0).
1201     void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true)
1202     {
1203         m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
1204         m_assembler.vmov(dest, fpTempRegisterAsSingle());
1205
1206         // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
1207         m_assembler.vcvt_signedToFloatingPoint(fpTempRegister, fpTempRegisterAsSingle());
1208         failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister));
1209
1210         // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
1211         if (negZeroCheck)
1212             failureCases.append(branchTest32(Zero, dest));
1213     }
1214
1215     Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
1216     {
1217         m_assembler.vcmpz(reg);
1218         m_assembler.vmrs();
1219         Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
1220         Jump result = makeBranch(ARMv7Assembler::ConditionNE);
1221         unordered.link(this);
1222         return result;
1223     }
1224
1225     Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID)
1226     {
1227         m_assembler.vcmpz(reg);
1228         m_assembler.vmrs();
1229         Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
1230         Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
1231         unordered.link(this);
1232         // We get here if either unordered or equal.
1233         Jump result = jump();
1234         notEqual.link(this);
1235         return result;
1236     }
1237
1238     // Stack manipulation operations:
1239     //
1240     // The ABI is assumed to provide a stack abstraction to memory,
1241     // containing machine word sized units of data.  Push and pop
1242     // operations add and remove a single register sized unit of data
1243     // to or from the stack.  Peek and poke operations read or write
1244     // values on the stack, without moving the current stack position.
1245     
1246     void pop(RegisterID dest)
1247     {
1248         m_assembler.pop(dest);
1249     }
1250
1251     void push(RegisterID src)
1252     {
1253         m_assembler.push(src);
1254     }
1255
1256     void push(Address address)
1257     {
1258         load32(address, dataTempRegister);
1259         push(dataTempRegister);
1260     }
1261
1262     void push(TrustedImm32 imm)
1263     {
1264         move(imm, dataTempRegister);
1265         push(dataTempRegister);
1266     }
1267
1268     void popPair(RegisterID dest1, RegisterID dest2)
1269     {
1270         m_assembler.pop(1 << dest1 | 1 << dest2);
1271     }
1272     
1273     void pushPair(RegisterID src1, RegisterID src2)
1274     {
1275         m_assembler.push(1 << src1 | 1 << src2);
1276     }
1277     
1278     // Register move operations:
1279     //
1280     // Move values in registers.
1281
1282     void move(TrustedImm32 imm, RegisterID dest)
1283     {
1284         uint32_t value = imm.m_value;
1285
1286         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(value);
1287
1288         if (armImm.isValid())
1289             m_assembler.mov(dest, armImm);
1290         else if ((armImm = ARMThumbImmediate::makeEncodedImm(~value)).isValid())
1291             m_assembler.mvn(dest, armImm);
1292         else {
1293             m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(value));
1294             if (value & 0xffff0000)
1295                 m_assembler.movt(dest, ARMThumbImmediate::makeUInt16(value >> 16));
1296         }
1297     }
1298
1299     void move(RegisterID src, RegisterID dest)
1300     {
1301         if (src != dest)
1302             m_assembler.mov(dest, src);
1303     }
1304
1305     void move(TrustedImmPtr imm, RegisterID dest)
1306     {
1307         move(TrustedImm32(imm), dest);
1308     }
1309
1310     void swap(RegisterID reg1, RegisterID reg2)
1311     {
1312         move(reg1, dataTempRegister);
1313         move(reg2, reg1);
1314         move(dataTempRegister, reg2);
1315     }
1316
1317     void signExtend32ToPtr(RegisterID src, RegisterID dest)
1318     {
1319         move(src, dest);
1320     }
1321
1322     void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
1323     {
1324         move(src, dest);
1325     }
1326
1327     // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
1328     static RelationalCondition invert(RelationalCondition cond)
1329     {
1330         return static_cast<RelationalCondition>(cond ^ 1);
1331     }
1332
1333     void nop()
1334     {
1335         m_assembler.nop();
1336     }
1337     
1338     void memoryFence()
1339     {
1340         m_assembler.dmbSY();
1341     }
1342     
1343     static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
1344     {
1345         ARMv7Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
1346     }
1347     
1348     static ptrdiff_t maxJumpReplacementSize()
1349     {
1350         return ARMv7Assembler::maxJumpReplacementSize();
1351     }
1352
1353     // Forwards / external control flow operations:
1354     //
1355     // This set of jump and conditional branch operations return a Jump
1356     // object which may linked at a later point, allow forwards jump,
1357     // or jumps that will require external linkage (after the code has been
1358     // relocated).
1359     //
1360     // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
1361     // respecitvely, for unsigned comparisons the names b, a, be, and ae are
1362     // used (representing the names 'below' and 'above').
1363     //
1364     // Operands to the comparision are provided in the expected order, e.g.
1365     // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
1366     // treated as a signed 32bit value, is less than or equal to 5.
1367     //
1368     // jz and jnz test whether the first operand is equal to zero, and take
1369     // an optional second operand of a mask under which to perform the test.
1370 private:
1371
1372     // Should we be using TEQ for equal/not-equal?
1373     void compare32AndSetFlags(RegisterID left, TrustedImm32 right)
1374     {
1375         int32_t imm = right.m_value;
1376         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
1377         if (armImm.isValid())
1378             m_assembler.cmp(left, armImm);
1379         else if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid())
1380             m_assembler.cmn(left, armImm);
1381         else {
1382             move(TrustedImm32(imm), dataTempRegister);
1383             m_assembler.cmp(left, dataTempRegister);
1384         }
1385     }
1386
1387 public:
1388     void test32(RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1389     {
1390         int32_t imm = mask.m_value;
1391
1392         if (imm == -1)
1393             m_assembler.tst(reg, reg);
1394         else {
1395             ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
1396             if (armImm.isValid()) {
1397                 if (reg == ARMRegisters::sp) {
1398                     move(reg, addressTempRegister);
1399                     m_assembler.tst(addressTempRegister, armImm);
1400                 } else
1401                     m_assembler.tst(reg, armImm);
1402             } else {
1403                 move(mask, dataTempRegister);
1404                 if (reg == ARMRegisters::sp) {
1405                     move(reg, addressTempRegister);
1406                     m_assembler.tst(addressTempRegister, dataTempRegister);
1407                 } else
1408                     m_assembler.tst(reg, dataTempRegister);
1409             }
1410         }
1411     }
1412     
1413     Jump branch(ResultCondition cond)
1414     {
1415         return Jump(makeBranch(cond));
1416     }
1417
1418     Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
1419     {
1420         m_assembler.cmp(left, right);
1421         return Jump(makeBranch(cond));
1422     }
1423
1424     Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
1425     {
1426         compare32AndSetFlags(left, right);
1427         return Jump(makeBranch(cond));
1428     }
1429
1430     Jump branch32(RelationalCondition cond, RegisterID left, Address right)
1431     {
1432         load32(right, dataTempRegister);
1433         return branch32(cond, left, dataTempRegister);
1434     }
1435
1436     Jump branch32(RelationalCondition cond, Address left, RegisterID right)
1437     {
1438         load32(left, dataTempRegister);
1439         return branch32(cond, dataTempRegister, right);
1440     }
1441
1442     Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
1443     {
1444         // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1445         load32(left, addressTempRegister);
1446         return branch32(cond, addressTempRegister, right);
1447     }
1448
1449     Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1450     {
1451         // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1452         load32(left, addressTempRegister);
1453         return branch32(cond, addressTempRegister, right);
1454     }
1455
1456     Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1457     {
1458         // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1459         load32WithUnalignedHalfWords(left, addressTempRegister);
1460         return branch32(cond, addressTempRegister, right);
1461     }
1462
1463     Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
1464     {
1465         load32(left.m_ptr, dataTempRegister);
1466         return branch32(cond, dataTempRegister, right);
1467     }
1468
1469     Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
1470     {
1471         // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1472         load32(left.m_ptr, addressTempRegister);
1473         return branch32(cond, addressTempRegister, right);
1474     }
1475
1476     Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
1477     {
1478         load32(left, dataTempRegister);
1479         return branch32(cond, dataTempRegister, right);
1480     }
1481
1482     Jump branch8(RelationalCondition cond, RegisterID left, TrustedImm32 right)
1483     {
1484         TrustedImm32 right8(static_cast<int8_t>(right.m_value));
1485         compare32AndSetFlags(left, right8);
1486         return Jump(makeBranch(cond));
1487     }
1488
1489     Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
1490     {
1491         // use addressTempRegister incase the branch8 we call uses dataTempRegister. :-/
1492         TrustedImm32 right8(static_cast<int8_t>(right.m_value));
1493         load8(left, addressTempRegister);
1494         return branch8(cond, addressTempRegister, right8);
1495     }
1496
1497     Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1498     {
1499         // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1500         TrustedImm32 right8(static_cast<int8_t>(right.m_value));
1501         load8(left, addressTempRegister);
1502         return branch32(cond, addressTempRegister, right8);
1503     }
1504     
1505     Jump branch8(RelationalCondition cond, AbsoluteAddress address, TrustedImm32 right)
1506     {
1507         // Use addressTempRegister instead of dataTempRegister, since branch32 uses dataTempRegister.
1508         TrustedImm32 right8(static_cast<int8_t>(right.m_value));
1509         move(TrustedImmPtr(address.m_ptr), addressTempRegister);
1510         load8(Address(addressTempRegister), addressTempRegister);
1511         return branch32(cond, addressTempRegister, right8);
1512     }
1513     
1514     Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
1515     {
1516         ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == PositiveOrZero);
1517         m_assembler.tst(reg, mask);
1518         return Jump(makeBranch(cond));
1519     }
1520
1521     Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1522     {
1523         ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == PositiveOrZero);
1524         test32(reg, mask);
1525         return Jump(makeBranch(cond));
1526     }
1527
1528     Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1529     {
1530         // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
1531         load32(address, addressTempRegister);
1532         return branchTest32(cond, addressTempRegister, mask);
1533     }
1534
1535     Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1536     {
1537         // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
1538         load32(address, addressTempRegister);
1539         return branchTest32(cond, addressTempRegister, mask);
1540     }
1541
1542     Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1543     {
1544         // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
1545         TrustedImm32 mask8(static_cast<int8_t>(mask.m_value));
1546         load8(address, addressTempRegister);
1547         return branchTest32(cond, addressTempRegister, mask8);
1548     }
1549
1550     Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1551     {
1552         // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
1553         TrustedImm32 mask8(static_cast<int8_t>(mask.m_value));
1554         load8(address, addressTempRegister);
1555         return branchTest32(cond, addressTempRegister, mask8);
1556     }
1557
1558     Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
1559     {
1560         // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
1561         TrustedImm32 mask8(static_cast<int8_t>(mask.m_value));
1562         move(TrustedImmPtr(address.m_ptr), addressTempRegister);
1563         load8(Address(addressTempRegister), addressTempRegister);
1564         return branchTest32(cond, addressTempRegister, mask8);
1565     }
1566
1567     void jump(RegisterID target)
1568     {
1569         m_assembler.bx(target);
1570     }
1571
1572     // Address is a memory location containing the address to jump to
1573     void jump(Address address)
1574     {
1575         load32(address, dataTempRegister);
1576         m_assembler.bx(dataTempRegister);
1577     }
1578     
1579     void jump(AbsoluteAddress address)
1580     {
1581         move(TrustedImmPtr(address.m_ptr), dataTempRegister);
1582         load32(Address(dataTempRegister), dataTempRegister);
1583         m_assembler.bx(dataTempRegister);
1584     }
1585
1586
1587     // Arithmetic control flow operations:
1588     //
1589     // This set of conditional branch operations branch based
1590     // on the result of an arithmetic operation.  The operation
1591     // is performed as normal, storing the result.
1592     //
1593     // * jz operations branch if the result is zero.
1594     // * jo operations branch if the (signed) arithmetic
1595     //   operation caused an overflow to occur.
1596     
1597     Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
1598     {
1599         m_assembler.add_S(dest, op1, op2);
1600         return Jump(makeBranch(cond));
1601     }
1602
1603     Jump branchAdd32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
1604     {
1605         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
1606         if (armImm.isValid())
1607             m_assembler.add_S(dest, op1, armImm);
1608         else {
1609             move(imm, dataTempRegister);
1610             m_assembler.add_S(dest, op1, dataTempRegister);
1611         }
1612         return Jump(makeBranch(cond));
1613     }
1614
1615     Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
1616     {
1617         return branchAdd32(cond, dest, src, dest);
1618     }
1619
1620     Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
1621     {
1622         load32(src, dataTempRegister);
1623         return branchAdd32(cond, dest, dataTempRegister, dest);
1624     }
1625
1626     Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1627     {
1628         return branchAdd32(cond, dest, imm, dest);
1629     }
1630
1631     Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
1632     {
1633         // Move the high bits of the address into addressTempRegister,
1634         // and load the value into dataTempRegister.
1635         move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
1636         m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
1637
1638         // Do the add.
1639         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
1640         if (armImm.isValid())
1641             m_assembler.add_S(dataTempRegister, dataTempRegister, armImm);
1642         else {
1643             // If the operand does not fit into an immediate then load it temporarily
1644             // into addressTempRegister; since we're overwriting addressTempRegister
1645             // we'll need to reload it with the high bits of the address afterwards.
1646             move(imm, addressTempRegister);
1647             m_assembler.add_S(dataTempRegister, dataTempRegister, addressTempRegister);
1648             move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
1649         }
1650
1651         // Store the result.
1652         m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
1653
1654         return Jump(makeBranch(cond));
1655     }
1656
1657     Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1658     {
1659         m_assembler.smull(dest, dataTempRegister, src1, src2);
1660
1661         if (cond == Overflow) {
1662             m_assembler.asr(addressTempRegister, dest, 31);
1663             return branch32(NotEqual, addressTempRegister, dataTempRegister);
1664         }
1665
1666         return branchTest32(cond, dest);
1667     }
1668
1669     Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
1670     {
1671         return branchMul32(cond, src, dest, dest);
1672     }
1673
1674     Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
1675     {
1676         move(imm, dataTempRegister);
1677         return branchMul32(cond, dataTempRegister, src, dest);
1678     }
1679
1680     Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
1681     {
1682         ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0);
1683         m_assembler.sub_S(srcDest, zero, srcDest);
1684         return Jump(makeBranch(cond));
1685     }
1686
1687     Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
1688     {
1689         m_assembler.orr_S(dest, dest, src);
1690         return Jump(makeBranch(cond));
1691     }
1692
1693     Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
1694     {
1695         m_assembler.sub_S(dest, op1, op2);
1696         return Jump(makeBranch(cond));
1697     }
1698
1699     Jump branchSub32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
1700     {
1701         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
1702         if (armImm.isValid())
1703             m_assembler.sub_S(dest, op1, armImm);
1704         else {
1705             move(imm, dataTempRegister);
1706             m_assembler.sub_S(dest, op1, dataTempRegister);
1707         }
1708         return Jump(makeBranch(cond));
1709     }
1710     
1711     Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
1712     {
1713         return branchSub32(cond, dest, src, dest);
1714     }
1715
1716     Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1717     {
1718         return branchSub32(cond, dest, imm, dest);
1719     }
1720     
1721     void relativeTableJump(RegisterID index, int scale)
1722     {
1723         ASSERT(scale >= 0 && scale <= 31);
1724
1725         // dataTempRegister will point after the jump if index register contains zero
1726         move(ARMRegisters::pc, dataTempRegister);
1727         m_assembler.add(dataTempRegister, dataTempRegister, ARMThumbImmediate::makeEncodedImm(9));
1728
1729         ShiftTypeAndAmount shift(SRType_LSL, scale);
1730         m_assembler.add(dataTempRegister, dataTempRegister, index, shift);
1731         jump(dataTempRegister);
1732     }
1733
1734     // Miscellaneous operations:
1735
1736     void breakpoint(uint8_t imm = 0)
1737     {
1738         m_assembler.bkpt(imm);
1739     }
1740
1741     ALWAYS_INLINE Call nearCall()
1742     {
1743         moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1744         return Call(m_assembler.blx(dataTempRegister), Call::LinkableNear);
1745     }
1746
1747     ALWAYS_INLINE Call nearTailCall()
1748     {
1749         moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1750         return Call(m_assembler.bx(dataTempRegister), Call::LinkableNearTail);
1751     }
1752
1753     ALWAYS_INLINE Call call()
1754     {
1755         moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1756         return Call(m_assembler.blx(dataTempRegister), Call::Linkable);
1757     }
1758
1759     ALWAYS_INLINE Call call(RegisterID target)
1760     {
1761         return Call(m_assembler.blx(target), Call::None);
1762     }
1763
1764     ALWAYS_INLINE Call call(Address address)
1765     {
1766         load32(address, dataTempRegister);
1767         return Call(m_assembler.blx(dataTempRegister), Call::None);
1768     }
1769
1770     ALWAYS_INLINE void ret()
1771     {
1772         m_assembler.bx(linkRegister);
1773     }
1774
1775     void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
1776     {
1777         m_assembler.cmp(left, right);
1778         m_assembler.it(armV7Condition(cond), false);
1779         m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1780         m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1781     }
1782
1783     void compare32(RelationalCondition cond, Address left, RegisterID right, RegisterID dest)
1784     {
1785         load32(left, dataTempRegister);
1786         compare32(cond, dataTempRegister, right, dest);
1787     }
1788
1789     void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
1790     {
1791         TrustedImm32 right8(static_cast<int8_t>(right.m_value));
1792         load8(left, addressTempRegister);
1793         compare32(cond, addressTempRegister, right8, dest);
1794     }
1795
1796     void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
1797     {
1798         compare32AndSetFlags(left, right);
1799         m_assembler.it(armV7Condition(cond), false);
1800         m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1801         m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1802     }
1803
1804     // FIXME:
1805     // The mask should be optional... paerhaps the argument order should be
1806     // dest-src, operations always have a dest? ... possibly not true, considering
1807     // asm ops like test, or pseudo ops like pop().
1808     void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1809     {
1810         load32(address, dataTempRegister);
1811         test32(dataTempRegister, mask);
1812         m_assembler.it(armV7Condition(cond), false);
1813         m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1814         m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1815     }
1816
1817     void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1818     {
1819         TrustedImm32 mask8(static_cast<int8_t>(mask.m_value));
1820         load8(address, dataTempRegister);
1821         test32(dataTempRegister, mask8);
1822         m_assembler.it(armV7Condition(cond), false);
1823         m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1824         m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1825     }
1826
1827     ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dst)
1828     {
1829         padBeforePatch();
1830         moveFixedWidthEncoding(imm, dst);
1831         return DataLabel32(this);
1832     }
1833
1834     ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dst)
1835     {
1836         padBeforePatch();
1837         moveFixedWidthEncoding(TrustedImm32(imm), dst);
1838         return DataLabelPtr(this);
1839     }
1840
1841     ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
1842     {
1843         dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
1844         return branch32(cond, left, dataTempRegister);
1845     }
1846
1847     ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
1848     {
1849         load32(left, addressTempRegister);
1850         dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
1851         return branch32(cond, addressTempRegister, dataTempRegister);
1852     }
1853     
1854     ALWAYS_INLINE Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
1855     {
1856         load32(left, addressTempRegister);
1857         dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
1858         return branch32(cond, addressTempRegister, dataTempRegister);
1859     }
1860     
1861     PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
1862     {
1863         m_makeJumpPatchable = true;
1864         Jump result = branch32(cond, left, TrustedImm32(right));
1865         m_makeJumpPatchable = false;
1866         return PatchableJump(result);
1867     }
1868     
1869     PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1870     {
1871         m_makeJumpPatchable = true;
1872         Jump result = branchTest32(cond, reg, mask);
1873         m_makeJumpPatchable = false;
1874         return PatchableJump(result);
1875     }
1876
1877     PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
1878     {
1879         m_makeJumpPatchable = true;
1880         Jump result = branch32(cond, reg, imm);
1881         m_makeJumpPatchable = false;
1882         return PatchableJump(result);
1883     }
1884
1885     PatchableJump patchableBranch32(RelationalCondition cond, Address left, TrustedImm32 imm)
1886     {
1887         m_makeJumpPatchable = true;
1888         Jump result = branch32(cond, left, imm);
1889         m_makeJumpPatchable = false;
1890         return PatchableJump(result);
1891     }
1892
1893     PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
1894     {
1895         m_makeJumpPatchable = true;
1896         Jump result = branchPtrWithPatch(cond, left, dataLabel, initialRightValue);
1897         m_makeJumpPatchable = false;
1898         return PatchableJump(result);
1899     }
1900
1901     PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
1902     {
1903         m_makeJumpPatchable = true;
1904         Jump result = branch32WithPatch(cond, left, dataLabel, initialRightValue);
1905         m_makeJumpPatchable = false;
1906         return PatchableJump(result);
1907     }
1908
1909     PatchableJump patchableJump()
1910     {
1911         padBeforePatch();
1912         m_makeJumpPatchable = true;
1913         Jump result = jump();
1914         m_makeJumpPatchable = false;
1915         return PatchableJump(result);
1916     }
1917
1918     ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
1919     {
1920         DataLabelPtr label = moveWithPatch(initialValue, dataTempRegister);
1921         store32(dataTempRegister, address);
1922         return label;
1923     }
1924     ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(TrustedImmPtr(0), address); }
1925
1926
1927     ALWAYS_INLINE Call tailRecursiveCall()
1928     {
1929         // Like a normal call, but don't link.
1930         moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1931         return Call(m_assembler.bx(dataTempRegister), Call::Linkable);
1932     }
1933
1934     ALWAYS_INLINE Call makeTailRecursiveCall(Jump oldJump)
1935     {
1936         oldJump.link(this);
1937         return tailRecursiveCall();
1938     }
1939
1940     
1941     static FunctionPtr readCallTarget(CodeLocationCall call)
1942     {
1943         return FunctionPtr(reinterpret_cast<void(*)()>(ARMv7Assembler::readCallTarget(call.dataLocation())));
1944     }
1945     
1946     static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
1947     static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
1948     
1949     static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
1950     {
1951         const unsigned twoWordOpSize = 4;
1952         return label.labelAtOffset(-twoWordOpSize * 2);
1953     }
1954     
1955     static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID rd, void* initialValue)
1956     {
1957 #if OS(LINUX)
1958         ARMv7Assembler::revertJumpTo_movT3movtcmpT2(instructionStart.dataLocation(), rd, dataTempRegister, reinterpret_cast<uintptr_t>(initialValue));
1959 #else
1960         UNUSED_PARAM(rd);
1961         ARMv7Assembler::revertJumpTo_movT3(instructionStart.dataLocation(), dataTempRegister, ARMThumbImmediate::makeUInt16(reinterpret_cast<uintptr_t>(initialValue) & 0xffff));
1962 #endif
1963     }
1964     
1965     static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
1966     {
1967         UNREACHABLE_FOR_PLATFORM();
1968         return CodeLocationLabel();
1969     }
1970     
1971     static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
1972     {
1973         UNREACHABLE_FOR_PLATFORM();
1974         return CodeLocationLabel();
1975     }
1976     
1977     static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
1978     {
1979         UNREACHABLE_FOR_PLATFORM();
1980     }
1981
1982     static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
1983     {
1984         UNREACHABLE_FOR_PLATFORM();
1985     }
1986
1987     static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
1988     {
1989         ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
1990     }
1991
1992     static void repatchCall(CodeLocationCall call, FunctionPtr destination)
1993     {
1994         ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
1995     }
1996
1997 #if ENABLE(MASM_PROBE)
1998     void probe(ProbeFunction, void* arg1, void* arg2);
1999 #endif // ENABLE(MASM_PROBE)
2000
2001 protected:
2002     ALWAYS_INLINE Jump jump()
2003     {
2004         m_assembler.label(); // Force nop-padding if we're in the middle of a watchpoint.
2005         moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
2006         return Jump(m_assembler.bx(dataTempRegister), m_makeJumpPatchable ? ARMv7Assembler::JumpNoConditionFixedSize : ARMv7Assembler::JumpNoCondition);
2007     }
2008
2009     ALWAYS_INLINE Jump makeBranch(ARMv7Assembler::Condition cond)
2010     {
2011         m_assembler.label(); // Force nop-padding if we're in the middle of a watchpoint.
2012         m_assembler.it(cond, true, true);
2013         moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
2014         return Jump(m_assembler.bx(dataTempRegister), m_makeJumpPatchable ? ARMv7Assembler::JumpConditionFixedSize : ARMv7Assembler::JumpCondition, cond);
2015     }
2016     ALWAYS_INLINE Jump makeBranch(RelationalCondition cond) { return makeBranch(armV7Condition(cond)); }
2017     ALWAYS_INLINE Jump makeBranch(ResultCondition cond) { return makeBranch(armV7Condition(cond)); }
2018     ALWAYS_INLINE Jump makeBranch(DoubleCondition cond) { return makeBranch(armV7Condition(cond)); }
2019
2020     ArmAddress setupArmAddress(BaseIndex address)
2021     {
2022         if (address.offset) {
2023             ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
2024             if (imm.isValid())
2025                 m_assembler.add(addressTempRegister, address.base, imm);
2026             else {
2027                 move(TrustedImm32(address.offset), addressTempRegister);
2028                 m_assembler.add(addressTempRegister, addressTempRegister, address.base);
2029             }
2030
2031             return ArmAddress(addressTempRegister, address.index, address.scale);
2032         } else
2033             return ArmAddress(address.base, address.index, address.scale);
2034     }
2035
2036     ArmAddress setupArmAddress(Address address)
2037     {
2038         if ((address.offset >= -0xff) && (address.offset <= 0xfff))
2039             return ArmAddress(address.base, address.offset);
2040
2041         move(TrustedImm32(address.offset), addressTempRegister);
2042         return ArmAddress(address.base, addressTempRegister);
2043     }
2044
2045     ArmAddress setupArmAddress(ImplicitAddress address)
2046     {
2047         if ((address.offset >= -0xff) && (address.offset <= 0xfff))
2048             return ArmAddress(address.base, address.offset);
2049
2050         move(TrustedImm32(address.offset), addressTempRegister);
2051         return ArmAddress(address.base, addressTempRegister);
2052     }
2053
2054     RegisterID makeBaseIndexBase(BaseIndex address)
2055     {
2056         if (!address.offset)
2057             return address.base;
2058
2059         ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
2060         if (imm.isValid())
2061             m_assembler.add(addressTempRegister, address.base, imm);
2062         else {
2063             move(TrustedImm32(address.offset), addressTempRegister);
2064             m_assembler.add(addressTempRegister, addressTempRegister, address.base);
2065         }
2066
2067         return addressTempRegister;
2068     }
2069
2070     void moveFixedWidthEncoding(TrustedImm32 imm, RegisterID dst)
2071     {
2072         uint32_t value = imm.m_value;
2073         m_assembler.movT3(dst, ARMThumbImmediate::makeUInt16(value & 0xffff));
2074         m_assembler.movt(dst, ARMThumbImmediate::makeUInt16(value >> 16));
2075     }
2076
2077     ARMv7Assembler::Condition armV7Condition(RelationalCondition cond)
2078     {
2079         return static_cast<ARMv7Assembler::Condition>(cond);
2080     }
2081
2082     ARMv7Assembler::Condition armV7Condition(ResultCondition cond)
2083     {
2084         return static_cast<ARMv7Assembler::Condition>(cond);
2085     }
2086
2087     ARMv7Assembler::Condition armV7Condition(DoubleCondition cond)
2088     {
2089         return static_cast<ARMv7Assembler::Condition>(cond);
2090     }
2091     
2092 private:
2093     friend class LinkBuffer;
2094
2095     static void linkCall(void* code, Call call, FunctionPtr function)
2096     {
2097         if (call.isFlagSet(Call::Tail))
2098             ARMv7Assembler::linkJump(code, call.m_label, function.value());
2099         else
2100             ARMv7Assembler::linkCall(code, call.m_label, function.value());
2101     }
2102
2103 #if ENABLE(MASM_PROBE)
2104     inline TrustedImm32 trustedImm32FromPtr(void* ptr)
2105     {
2106         return TrustedImm32(TrustedImmPtr(ptr));
2107     }
2108
2109     inline TrustedImm32 trustedImm32FromPtr(ProbeFunction function)
2110     {
2111         return TrustedImm32(TrustedImmPtr(reinterpret_cast<void*>(function)));
2112     }
2113
2114     inline TrustedImm32 trustedImm32FromPtr(void (*function)())
2115     {
2116         return TrustedImm32(TrustedImmPtr(reinterpret_cast<void*>(function)));
2117     }
2118 #endif
2119
2120     bool m_makeJumpPatchable;
2121 };
2122
2123 } // namespace JSC
2124
2125 #endif // ENABLE(ASSEMBLER)
2126
2127 #endif // MacroAssemblerARMv7_h