JITMathIC was misusing maxJumpReplacementSize
[WebKit-https.git] / Source / JavaScriptCore / assembler / MacroAssemblerARMv7.h
1 /*
2  * Copyright (C) 2009-2010, 2014-2015 Apple Inc. All rights reserved.
3  * Copyright (C) 2010 University of Szeged
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
18  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
25  */
26
27 #ifndef MacroAssemblerARMv7_h
28 #define MacroAssemblerARMv7_h
29
30 #if ENABLE(ASSEMBLER)
31
32 #include "ARMv7Assembler.h"
33 #include "AbstractMacroAssembler.h"
34
35 namespace JSC {
36
37 class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler, MacroAssemblerARMv7> {
38     static const RegisterID dataTempRegister = ARMRegisters::ip;
39     static const RegisterID addressTempRegister = ARMRegisters::r6;
40
41     static const ARMRegisters::FPDoubleRegisterID fpTempRegister = ARMRegisters::d7;
42     inline ARMRegisters::FPSingleRegisterID fpTempRegisterAsSingle() { return ARMRegisters::asSingle(fpTempRegister); }
43
44 public:
45     static const unsigned numGPRs = 16;
46     static const unsigned numFPRs = 16;
47     
48     MacroAssemblerARMv7()
49         : m_makeJumpPatchable(false)
50     {
51     }
52
53     typedef ARMv7Assembler::LinkRecord LinkRecord;
54     typedef ARMv7Assembler::JumpType JumpType;
55     typedef ARMv7Assembler::JumpLinkType JumpLinkType;
56     typedef ARMv7Assembler::Condition Condition;
57
58     static const ARMv7Assembler::Condition DefaultCondition = ARMv7Assembler::ConditionInvalid;
59     static const ARMv7Assembler::JumpType DefaultJump = ARMv7Assembler::JumpNoConditionFixedSize;
60
61     static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
62     {
63         return value >= -255 && value <= 255;
64     }
65
66     Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); }
67     void* unlinkedCode() { return m_assembler.unlinkedCode(); }
68     static bool canCompact(JumpType jumpType) { return ARMv7Assembler::canCompact(jumpType); }
69     static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return ARMv7Assembler::computeJumpType(jumpType, from, to); }
70     static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return ARMv7Assembler::computeJumpType(record, from, to); }
71     static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return ARMv7Assembler::jumpSizeDelta(jumpType, jumpLinkType); }
72     static void link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction, uint8_t* to) { return ARMv7Assembler::link(record, from, fromInstruction, to); }
73
74     struct ArmAddress {
75         enum AddressType {
76             HasOffset,
77             HasIndex,
78         } type;
79         RegisterID base;
80         union {
81             int32_t offset;
82             struct {
83                 RegisterID index;
84                 Scale scale;
85             };
86         } u;
87         
88         explicit ArmAddress(RegisterID base, int32_t offset = 0)
89             : type(HasOffset)
90             , base(base)
91         {
92             u.offset = offset;
93         }
94         
95         explicit ArmAddress(RegisterID base, RegisterID index, Scale scale = TimesOne)
96             : type(HasIndex)
97             , base(base)
98         {
99             u.index = index;
100             u.scale = scale;
101         }
102     };
103     
104 public:
105     static const Scale ScalePtr = TimesFour;
106
107     enum RelationalCondition {
108         Equal = ARMv7Assembler::ConditionEQ,
109         NotEqual = ARMv7Assembler::ConditionNE,
110         Above = ARMv7Assembler::ConditionHI,
111         AboveOrEqual = ARMv7Assembler::ConditionHS,
112         Below = ARMv7Assembler::ConditionLO,
113         BelowOrEqual = ARMv7Assembler::ConditionLS,
114         GreaterThan = ARMv7Assembler::ConditionGT,
115         GreaterThanOrEqual = ARMv7Assembler::ConditionGE,
116         LessThan = ARMv7Assembler::ConditionLT,
117         LessThanOrEqual = ARMv7Assembler::ConditionLE
118     };
119
120     enum ResultCondition {
121         Overflow = ARMv7Assembler::ConditionVS,
122         Signed = ARMv7Assembler::ConditionMI,
123         PositiveOrZero = ARMv7Assembler::ConditionPL,
124         Zero = ARMv7Assembler::ConditionEQ,
125         NonZero = ARMv7Assembler::ConditionNE
126     };
127
128     enum DoubleCondition {
129         // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
130         DoubleEqual = ARMv7Assembler::ConditionEQ,
131         DoubleNotEqual = ARMv7Assembler::ConditionVC, // Not the right flag! check for this & handle differently.
132         DoubleGreaterThan = ARMv7Assembler::ConditionGT,
133         DoubleGreaterThanOrEqual = ARMv7Assembler::ConditionGE,
134         DoubleLessThan = ARMv7Assembler::ConditionLO,
135         DoubleLessThanOrEqual = ARMv7Assembler::ConditionLS,
136         // If either operand is NaN, these conditions always evaluate to true.
137         DoubleEqualOrUnordered = ARMv7Assembler::ConditionVS, // Not the right flag! check for this & handle differently.
138         DoubleNotEqualOrUnordered = ARMv7Assembler::ConditionNE,
139         DoubleGreaterThanOrUnordered = ARMv7Assembler::ConditionHI,
140         DoubleGreaterThanOrEqualOrUnordered = ARMv7Assembler::ConditionHS,
141         DoubleLessThanOrUnordered = ARMv7Assembler::ConditionLT,
142         DoubleLessThanOrEqualOrUnordered = ARMv7Assembler::ConditionLE,
143     };
144
145     static const RegisterID stackPointerRegister = ARMRegisters::sp;
146     static const RegisterID framePointerRegister = ARMRegisters::fp;
147     static const RegisterID linkRegister = ARMRegisters::lr;
148
149     // Integer arithmetic operations:
150     //
151     // Operations are typically two operand - operation(source, srcDst)
152     // For many operations the source may be an TrustedImm32, the srcDst operand
153     // may often be a memory location (explictly described using an Address
154     // object).
155
156     void add32(RegisterID src, RegisterID dest)
157     {
158         m_assembler.add(dest, dest, src);
159     }
160
161     void add32(RegisterID left, RegisterID right, RegisterID dest)
162     {
163         m_assembler.add(dest, left, right);
164     }
165
166     void add32(TrustedImm32 imm, RegisterID dest)
167     {
168         add32(imm, dest, dest);
169     }
170     
171     void add32(AbsoluteAddress src, RegisterID dest)
172     {
173         load32(src.m_ptr, dataTempRegister);
174         add32(dataTempRegister, dest);
175     }
176
177     void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
178     {
179         ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
180
181         // For adds with stack pointer destination, moving the src first to sp is
182         // needed to avoid unpredictable instruction
183         if (dest == ARMRegisters::sp && src != dest) {
184             move(src, ARMRegisters::sp);
185             src = ARMRegisters::sp;
186         }
187
188         if (armImm.isValid())
189             m_assembler.add(dest, src, armImm);
190         else {
191             move(imm, dataTempRegister);
192             m_assembler.add(dest, src, dataTempRegister);
193         }
194     }
195
196     void add32(TrustedImm32 imm, Address address)
197     {
198         load32(address, dataTempRegister);
199
200         ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
201         if (armImm.isValid())
202             m_assembler.add(dataTempRegister, dataTempRegister, armImm);
203         else {
204             // Hrrrm, since dataTempRegister holds the data loaded,
205             // use addressTempRegister to hold the immediate.
206             move(imm, addressTempRegister);
207             m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
208         }
209
210         store32(dataTempRegister, address);
211     }
212
213     void add32(Address src, RegisterID dest)
214     {
215         load32(src, dataTempRegister);
216         add32(dataTempRegister, dest);
217     }
218
219     void add32(TrustedImm32 imm, AbsoluteAddress address)
220     {
221         load32(address.m_ptr, dataTempRegister);
222
223         ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
224         if (armImm.isValid())
225             m_assembler.add(dataTempRegister, dataTempRegister, armImm);
226         else {
227             // Hrrrm, since dataTempRegister holds the data loaded,
228             // use addressTempRegister to hold the immediate.
229             move(imm, addressTempRegister);
230             m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
231         }
232
233         store32(dataTempRegister, address.m_ptr);
234     }
235
236     void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest)
237     {
238         add32(imm, srcDest);
239     }
240     
241     void add64(TrustedImm32 imm, AbsoluteAddress address)
242     {
243         move(TrustedImmPtr(address.m_ptr), addressTempRegister);
244
245         m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(0));
246         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
247         if (armImm.isValid())
248             m_assembler.add_S(dataTempRegister, dataTempRegister, armImm);
249         else {
250             move(imm, addressTempRegister);
251             m_assembler.add_S(dataTempRegister, dataTempRegister, addressTempRegister);
252             move(TrustedImmPtr(address.m_ptr), addressTempRegister);
253         }
254         m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(0));
255
256         m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(4));
257         m_assembler.adc(dataTempRegister, dataTempRegister, ARMThumbImmediate::makeEncodedImm(imm.m_value >> 31));
258         m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(4));
259     }
260
261     void and32(RegisterID op1, RegisterID op2, RegisterID dest)
262     {
263         m_assembler.ARM_and(dest, op1, op2);
264     }
265
266     void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
267     {
268         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
269         if (armImm.isValid())
270             m_assembler.ARM_and(dest, src, armImm);
271         else {
272             move(imm, dataTempRegister);
273             m_assembler.ARM_and(dest, src, dataTempRegister);
274         }
275     }
276
277     void and32(RegisterID src, RegisterID dest)
278     {
279         and32(dest, src, dest);
280     }
281
282     void and32(TrustedImm32 imm, RegisterID dest)
283     {
284         and32(imm, dest, dest);
285     }
286
287     void and32(Address src, RegisterID dest)
288     {
289         load32(src, dataTempRegister);
290         and32(dataTempRegister, dest);
291     }
292
293     void countLeadingZeros32(RegisterID src, RegisterID dest)
294     {
295         m_assembler.clz(dest, src);
296     }
297
298     void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
299     {
300         // Clamp the shift to the range 0..31
301         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
302         ASSERT(armImm.isValid());
303         m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
304
305         m_assembler.lsl(dest, src, dataTempRegister);
306     }
307
308     void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
309     {
310         m_assembler.lsl(dest, src, imm.m_value & 0x1f);
311     }
312
313     void lshift32(RegisterID shiftAmount, RegisterID dest)
314     {
315         lshift32(dest, shiftAmount, dest);
316     }
317
318     void lshift32(TrustedImm32 imm, RegisterID dest)
319     {
320         lshift32(dest, imm, dest);
321     }
322
323     void mul32(RegisterID src, RegisterID dest)
324     {
325         m_assembler.smull(dest, dataTempRegister, dest, src);
326     }
327
328     void mul32(RegisterID left, RegisterID right, RegisterID dest)
329     {
330         m_assembler.smull(dest, dataTempRegister, left, right);
331     }
332
333     void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
334     {
335         move(imm, dataTempRegister);
336         m_assembler.smull(dest, dataTempRegister, src, dataTempRegister);
337     }
338
339     void neg32(RegisterID srcDest)
340     {
341         m_assembler.neg(srcDest, srcDest);
342     }
343
344     void or32(RegisterID src, RegisterID dest)
345     {
346         m_assembler.orr(dest, dest, src);
347     }
348     
349     void or32(RegisterID src, AbsoluteAddress dest)
350     {
351         move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
352         load32(addressTempRegister, dataTempRegister);
353         or32(src, dataTempRegister);
354         store32(dataTempRegister, addressTempRegister);
355     }
356
357     void or32(TrustedImm32 imm, AbsoluteAddress address)
358     {
359         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
360         if (armImm.isValid()) {
361             move(TrustedImmPtr(address.m_ptr), addressTempRegister);
362             load32(addressTempRegister, dataTempRegister);
363             m_assembler.orr(dataTempRegister, dataTempRegister, armImm);
364             store32(dataTempRegister, addressTempRegister);
365         } else {
366             move(TrustedImmPtr(address.m_ptr), addressTempRegister);
367             load32(addressTempRegister, dataTempRegister);
368             move(imm, addressTempRegister);
369             m_assembler.orr(dataTempRegister, dataTempRegister, addressTempRegister);
370             move(TrustedImmPtr(address.m_ptr), addressTempRegister);
371             store32(dataTempRegister, addressTempRegister);
372         }
373     }
374
375     void or32(TrustedImm32 imm, Address address)
376     {
377         load32(address, dataTempRegister);
378         or32(imm, dataTempRegister, dataTempRegister);
379         store32(dataTempRegister, address);
380     }
381
382     void or32(TrustedImm32 imm, RegisterID dest)
383     {
384         or32(imm, dest, dest);
385     }
386
387     void or32(RegisterID op1, RegisterID op2, RegisterID dest)
388     {
389         m_assembler.orr(dest, op1, op2);
390     }
391
392     void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
393     {
394         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
395         if (armImm.isValid())
396             m_assembler.orr(dest, src, armImm);
397         else {
398             ASSERT(src != dataTempRegister);
399             move(imm, dataTempRegister);
400             m_assembler.orr(dest, src, dataTempRegister);
401         }
402     }
403
404     void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
405     {
406         // Clamp the shift to the range 0..31
407         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
408         ASSERT(armImm.isValid());
409         m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
410
411         m_assembler.asr(dest, src, dataTempRegister);
412     }
413
414     void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
415     {
416         if (!imm.m_value)
417             move(src, dest);
418         else
419             m_assembler.asr(dest, src, imm.m_value & 0x1f);
420     }
421
422     void rshift32(RegisterID shiftAmount, RegisterID dest)
423     {
424         rshift32(dest, shiftAmount, dest);
425     }
426     
427     void rshift32(TrustedImm32 imm, RegisterID dest)
428     {
429         rshift32(dest, imm, dest);
430     }
431
432     void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
433     {
434         // Clamp the shift to the range 0..31
435         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
436         ASSERT(armImm.isValid());
437         m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
438         
439         m_assembler.lsr(dest, src, dataTempRegister);
440     }
441     
442     void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
443     {
444         if (!imm.m_value)
445             move(src, dest);
446         else
447             m_assembler.lsr(dest, src, imm.m_value & 0x1f);
448     }
449
450     void urshift32(RegisterID shiftAmount, RegisterID dest)
451     {
452         urshift32(dest, shiftAmount, dest);
453     }
454     
455     void urshift32(TrustedImm32 imm, RegisterID dest)
456     {
457         urshift32(dest, imm, dest);
458     }
459
460     void sub32(RegisterID src, RegisterID dest)
461     {
462         m_assembler.sub(dest, dest, src);
463     }
464
465     void sub32(TrustedImm32 imm, RegisterID dest)
466     {
467         ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
468         if (armImm.isValid())
469             m_assembler.sub(dest, dest, armImm);
470         else {
471             move(imm, dataTempRegister);
472             m_assembler.sub(dest, dest, dataTempRegister);
473         }
474     }
475
476     void sub32(TrustedImm32 imm, Address address)
477     {
478         load32(address, dataTempRegister);
479
480         ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
481         if (armImm.isValid())
482             m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
483         else {
484             // Hrrrm, since dataTempRegister holds the data loaded,
485             // use addressTempRegister to hold the immediate.
486             move(imm, addressTempRegister);
487             m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
488         }
489
490         store32(dataTempRegister, address);
491     }
492
493     void sub32(Address src, RegisterID dest)
494     {
495         load32(src, dataTempRegister);
496         sub32(dataTempRegister, dest);
497     }
498
499     void sub32(TrustedImm32 imm, AbsoluteAddress address)
500     {
501         load32(address.m_ptr, dataTempRegister);
502
503         ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
504         if (armImm.isValid())
505             m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
506         else {
507             // Hrrrm, since dataTempRegister holds the data loaded,
508             // use addressTempRegister to hold the immediate.
509             move(imm, addressTempRegister);
510             m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
511         }
512
513         store32(dataTempRegister, address.m_ptr);
514     }
515
516     void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
517     {
518         m_assembler.eor(dest, op1, op2);
519     }
520
521     void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
522     {
523         if (imm.m_value == -1) {
524             m_assembler.mvn(dest, src);
525             return;
526         }
527
528         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
529         if (armImm.isValid())
530             m_assembler.eor(dest, src, armImm);
531         else {
532             move(imm, dataTempRegister);
533             m_assembler.eor(dest, src, dataTempRegister);
534         }
535     }
536
537     void xor32(RegisterID src, RegisterID dest)
538     {
539         xor32(dest, src, dest);
540     }
541
542     void xor32(TrustedImm32 imm, RegisterID dest)
543     {
544         if (imm.m_value == -1)
545             m_assembler.mvn(dest, dest);
546         else
547             xor32(imm, dest, dest);
548     }
549     
550
551     // Memory access operations:
552     //
553     // Loads are of the form load(address, destination) and stores of the form
554     // store(source, address).  The source for a store may be an TrustedImm32.  Address
555     // operand objects to loads and store will be implicitly constructed if a
556     // register is passed.
557
558 private:
559     void load32(ArmAddress address, RegisterID dest)
560     {
561         if (address.type == ArmAddress::HasIndex)
562             m_assembler.ldr(dest, address.base, address.u.index, address.u.scale);
563         else if (address.u.offset >= 0) {
564             ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
565             ASSERT(armImm.isValid());
566             m_assembler.ldr(dest, address.base, armImm);
567         } else {
568             ASSERT(address.u.offset >= -255);
569             m_assembler.ldr(dest, address.base, address.u.offset, true, false);
570         }
571     }
572
573     void load16(ArmAddress address, RegisterID dest)
574     {
575         if (address.type == ArmAddress::HasIndex)
576             m_assembler.ldrh(dest, address.base, address.u.index, address.u.scale);
577         else if (address.u.offset >= 0) {
578             ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
579             ASSERT(armImm.isValid());
580             m_assembler.ldrh(dest, address.base, armImm);
581         } else {
582             ASSERT(address.u.offset >= -255);
583             m_assembler.ldrh(dest, address.base, address.u.offset, true, false);
584         }
585     }
586     
587     void load16SignedExtendTo32(ArmAddress address, RegisterID dest)
588     {
589         ASSERT(address.type == ArmAddress::HasIndex);
590         m_assembler.ldrsh(dest, address.base, address.u.index, address.u.scale);
591     }
592
593     void load8(ArmAddress address, RegisterID dest)
594     {
595         if (address.type == ArmAddress::HasIndex)
596             m_assembler.ldrb(dest, address.base, address.u.index, address.u.scale);
597         else if (address.u.offset >= 0) {
598             ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
599             ASSERT(armImm.isValid());
600             m_assembler.ldrb(dest, address.base, armImm);
601         } else {
602             ASSERT(address.u.offset >= -255);
603             m_assembler.ldrb(dest, address.base, address.u.offset, true, false);
604         }
605     }
606     
607     void load8SignedExtendTo32(ArmAddress address, RegisterID dest)
608     {
609         ASSERT(address.type == ArmAddress::HasIndex);
610         m_assembler.ldrsb(dest, address.base, address.u.index, address.u.scale);
611     }
612
613 protected:
614     void store32(RegisterID src, ArmAddress address)
615     {
616         if (address.type == ArmAddress::HasIndex)
617             m_assembler.str(src, address.base, address.u.index, address.u.scale);
618         else if (address.u.offset >= 0) {
619             ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
620             ASSERT(armImm.isValid());
621             m_assembler.str(src, address.base, armImm);
622         } else {
623             ASSERT(address.u.offset >= -255);
624             m_assembler.str(src, address.base, address.u.offset, true, false);
625         }
626     }
627
628 private:
629     void store8(RegisterID src, ArmAddress address)
630     {
631         if (address.type == ArmAddress::HasIndex)
632             m_assembler.strb(src, address.base, address.u.index, address.u.scale);
633         else if (address.u.offset >= 0) {
634             ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
635             ASSERT(armImm.isValid());
636             m_assembler.strb(src, address.base, armImm);
637         } else {
638             ASSERT(address.u.offset >= -255);
639             m_assembler.strb(src, address.base, address.u.offset, true, false);
640         }
641     }
642     
643     void store16(RegisterID src, ArmAddress address)
644     {
645         if (address.type == ArmAddress::HasIndex)
646             m_assembler.strh(src, address.base, address.u.index, address.u.scale);
647         else if (address.u.offset >= 0) {
648             ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
649             ASSERT(armImm.isValid());
650             m_assembler.strh(src, address.base, armImm);
651         } else {
652             ASSERT(address.u.offset >= -255);
653             m_assembler.strh(src, address.base, address.u.offset, true, false);
654         }
655     }
656
657 public:
658     void load32(ImplicitAddress address, RegisterID dest)
659     {
660         load32(setupArmAddress(address), dest);
661     }
662
663     void load32(BaseIndex address, RegisterID dest)
664     {
665         load32(setupArmAddress(address), dest);
666     }
667
668     void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
669     {
670         load32(setupArmAddress(address), dest);
671     }
672
673     void load16Unaligned(BaseIndex address, RegisterID dest)
674     {
675         load16(setupArmAddress(address), dest);
676     }
677
678     void load32(const void* address, RegisterID dest)
679     {
680         move(TrustedImmPtr(address), addressTempRegister);
681         m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
682     }
683     
684     void abortWithReason(AbortReason reason)
685     {
686         move(TrustedImm32(reason), dataTempRegister);
687         breakpoint();
688     }
689
690     void abortWithReason(AbortReason reason, intptr_t misc)
691     {
692         move(TrustedImm32(misc), addressTempRegister);
693         abortWithReason(reason);
694     }
695
696     ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
697     {
698         ConvertibleLoadLabel result(this);
699         ASSERT(address.offset >= 0 && address.offset <= 255);
700         m_assembler.ldrWide8BitImmediate(dest, address.base, address.offset);
701         return result;
702     }
703
704     void load8(ImplicitAddress address, RegisterID dest)
705     {
706         load8(setupArmAddress(address), dest);
707     }
708
709     void load8SignedExtendTo32(ImplicitAddress, RegisterID)
710     {
711         UNREACHABLE_FOR_PLATFORM();
712     }
713
714     void load8(BaseIndex address, RegisterID dest)
715     {
716         load8(setupArmAddress(address), dest);
717     }
718     
719     void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
720     {
721         load8SignedExtendTo32(setupArmAddress(address), dest);
722     }
723
724     void load8(const void* address, RegisterID dest)
725     {
726         move(TrustedImmPtr(address), dest);
727         load8(dest, dest);
728     }
729
730     DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
731     {
732         DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister);
733         load32(ArmAddress(address.base, dataTempRegister), dest);
734         return label;
735     }
736     
737     DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
738     {
739         padBeforePatch();
740
741         RegisterID base = address.base;
742         
743         DataLabelCompact label(this);
744         ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
745
746         m_assembler.ldr(dest, base, address.offset, true, false);
747         return label;
748     }
749
750     void load16(BaseIndex address, RegisterID dest)
751     {
752         m_assembler.ldrh(dest, makeBaseIndexBase(address), address.index, address.scale);
753     }
754     
755     void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
756     {
757         load16SignedExtendTo32(setupArmAddress(address), dest);
758     }
759     
760     void load16(ImplicitAddress address, RegisterID dest)
761     {
762         ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.offset);
763         if (armImm.isValid())
764             m_assembler.ldrh(dest, address.base, armImm);
765         else {
766             move(TrustedImm32(address.offset), dataTempRegister);
767             m_assembler.ldrh(dest, address.base, dataTempRegister);
768         }
769     }
770     
771     void load16SignedExtendTo32(ImplicitAddress, RegisterID)
772     {
773         UNREACHABLE_FOR_PLATFORM();
774     }
775
776     DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
777     {
778         DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister);
779         store32(src, ArmAddress(address.base, dataTempRegister));
780         return label;
781     }
782
783     void store32(RegisterID src, ImplicitAddress address)
784     {
785         store32(src, setupArmAddress(address));
786     }
787
788     void store32(RegisterID src, BaseIndex address)
789     {
790         store32(src, setupArmAddress(address));
791     }
792
793     void store32(TrustedImm32 imm, ImplicitAddress address)
794     {
795         move(imm, dataTempRegister);
796         store32(dataTempRegister, setupArmAddress(address));
797     }
798
799     void store32(TrustedImm32 imm, BaseIndex address)
800     {
801         move(imm, dataTempRegister);
802         store32(dataTempRegister, setupArmAddress(address));
803     }
804
805     void store32(RegisterID src, const void* address)
806     {
807         move(TrustedImmPtr(address), addressTempRegister);
808         m_assembler.str(src, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
809     }
810
811     void store32(TrustedImm32 imm, const void* address)
812     {
813         move(imm, dataTempRegister);
814         store32(dataTempRegister, address);
815     }
816
817     void store8(RegisterID src, Address address)
818     {
819         store8(src, setupArmAddress(address));
820     }
821     
822     void store8(RegisterID src, BaseIndex address)
823     {
824         store8(src, setupArmAddress(address));
825     }
826     
827     void store8(RegisterID src, void* address)
828     {
829         move(TrustedImmPtr(address), addressTempRegister);
830         store8(src, ArmAddress(addressTempRegister, 0));
831     }
832     
833     void store8(TrustedImm32 imm, void* address)
834     {
835         TrustedImm32 imm8(static_cast<int8_t>(imm.m_value));
836         move(imm8, dataTempRegister);
837         store8(dataTempRegister, address);
838     }
839     
840     void store8(TrustedImm32 imm, Address address)
841     {
842         TrustedImm32 imm8(static_cast<int8_t>(imm.m_value));
843         move(imm8, dataTempRegister);
844         store8(dataTempRegister, address);
845     }
846     
847     void store16(RegisterID src, BaseIndex address)
848     {
849         store16(src, setupArmAddress(address));
850     }
851
852     // Possibly clobbers src, but not on this architecture.
853     void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
854     {
855         m_assembler.vmov(dest1, dest2, src);
856     }
857     
858     void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch)
859     {
860         UNUSED_PARAM(scratch);
861         m_assembler.vmov(dest, src1, src2);
862     }
863
864     static bool shouldBlindForSpecificArch(uint32_t value)
865     {
866         ARMThumbImmediate immediate = ARMThumbImmediate::makeEncodedImm(value);
867
868         // Couldn't be encoded as an immediate, so assume it's untrusted.
869         if (!immediate.isValid())
870             return true;
871         
872         // If we can encode the immediate, we have less than 16 attacker
873         // controlled bits.
874         if (immediate.isEncodedImm())
875             return false;
876
877         // Don't let any more than 12 bits of an instruction word
878         // be controlled by an attacker.
879         return !immediate.isUInt12();
880     }
881
882     // Floating-point operations:
883
884     static bool supportsFloatingPoint() { return true; }
885     static bool supportsFloatingPointTruncate() { return true; }
886     static bool supportsFloatingPointSqrt() { return true; }
887     static bool supportsFloatingPointAbs() { return true; }
888     static bool supportsFloatingPointRounding() { return false; }
889
890     void loadDouble(ImplicitAddress address, FPRegisterID dest)
891     {
892         RegisterID base = address.base;
893         int32_t offset = address.offset;
894
895         // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
896         if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
897             add32(TrustedImm32(offset), base, addressTempRegister);
898             base = addressTempRegister;
899             offset = 0;
900         }
901         
902         m_assembler.vldr(dest, base, offset);
903     }
904
905     void loadFloat(ImplicitAddress address, FPRegisterID dest)
906     {
907         RegisterID base = address.base;
908         int32_t offset = address.offset;
909
910         // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
911         if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
912             add32(TrustedImm32(offset), base, addressTempRegister);
913             base = addressTempRegister;
914             offset = 0;
915         }
916         
917         m_assembler.flds(ARMRegisters::asSingle(dest), base, offset);
918     }
919
920     void loadDouble(BaseIndex address, FPRegisterID dest)
921     {
922         move(address.index, addressTempRegister);
923         lshift32(TrustedImm32(address.scale), addressTempRegister);
924         add32(address.base, addressTempRegister);
925         loadDouble(Address(addressTempRegister, address.offset), dest);
926     }
927     
928     void loadFloat(BaseIndex address, FPRegisterID dest)
929     {
930         move(address.index, addressTempRegister);
931         lshift32(TrustedImm32(address.scale), addressTempRegister);
932         add32(address.base, addressTempRegister);
933         loadFloat(Address(addressTempRegister, address.offset), dest);
934     }
935
936     void moveDouble(FPRegisterID src, FPRegisterID dest)
937     {
938         if (src != dest)
939             m_assembler.vmov(dest, src);
940     }
941
942     void moveZeroToDouble(FPRegisterID reg)
943     {
944         static double zeroConstant = 0.;
945         loadDouble(TrustedImmPtr(&zeroConstant), reg);
946     }
947
948     void loadDouble(TrustedImmPtr address, FPRegisterID dest)
949     {
950         move(address, addressTempRegister);
951         m_assembler.vldr(dest, addressTempRegister, 0);
952     }
953
954     void storeDouble(FPRegisterID src, ImplicitAddress address)
955     {
956         RegisterID base = address.base;
957         int32_t offset = address.offset;
958
959         // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
960         if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
961             add32(TrustedImm32(offset), base, addressTempRegister);
962             base = addressTempRegister;
963             offset = 0;
964         }
965         
966         m_assembler.vstr(src, base, offset);
967     }
968
969     void storeFloat(FPRegisterID src, ImplicitAddress address)
970     {
971         RegisterID base = address.base;
972         int32_t offset = address.offset;
973
974         // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
975         if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
976             add32(TrustedImm32(offset), base, addressTempRegister);
977             base = addressTempRegister;
978             offset = 0;
979         }
980         
981         m_assembler.fsts(ARMRegisters::asSingle(src), base, offset);
982     }
983
984     void storeDouble(FPRegisterID src, TrustedImmPtr address)
985     {
986         move(address, addressTempRegister);
987         storeDouble(src, addressTempRegister);
988     }
989
990     void storeDouble(FPRegisterID src, BaseIndex address)
991     {
992         move(address.index, addressTempRegister);
993         lshift32(TrustedImm32(address.scale), addressTempRegister);
994         add32(address.base, addressTempRegister);
995         storeDouble(src, Address(addressTempRegister, address.offset));
996     }
997     
998     void storeFloat(FPRegisterID src, BaseIndex address)
999     {
1000         move(address.index, addressTempRegister);
1001         lshift32(TrustedImm32(address.scale), addressTempRegister);
1002         add32(address.base, addressTempRegister);
1003         storeFloat(src, Address(addressTempRegister, address.offset));
1004     }
1005     
1006     void addDouble(FPRegisterID src, FPRegisterID dest)
1007     {
1008         m_assembler.vadd(dest, dest, src);
1009     }
1010
1011     void addDouble(Address src, FPRegisterID dest)
1012     {
1013         loadDouble(src, fpTempRegister);
1014         addDouble(fpTempRegister, dest);
1015     }
1016
1017     void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1018     {
1019         m_assembler.vadd(dest, op1, op2);
1020     }
1021
1022     void addDouble(AbsoluteAddress address, FPRegisterID dest)
1023     {
1024         loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister);
1025         m_assembler.vadd(dest, dest, fpTempRegister);
1026     }
1027
1028     void divDouble(FPRegisterID src, FPRegisterID dest)
1029     {
1030         m_assembler.vdiv(dest, dest, src);
1031     }
1032
1033     void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1034     {
1035         m_assembler.vdiv(dest, op1, op2);
1036     }
1037
1038     void subDouble(FPRegisterID src, FPRegisterID dest)
1039     {
1040         m_assembler.vsub(dest, dest, src);
1041     }
1042
1043     void subDouble(Address src, FPRegisterID dest)
1044     {
1045         loadDouble(src, fpTempRegister);
1046         subDouble(fpTempRegister, dest);
1047     }
1048
1049     void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1050     {
1051         m_assembler.vsub(dest, op1, op2);
1052     }
1053
1054     void mulDouble(FPRegisterID src, FPRegisterID dest)
1055     {
1056         m_assembler.vmul(dest, dest, src);
1057     }
1058
1059     void mulDouble(Address src, FPRegisterID dest)
1060     {
1061         loadDouble(src, fpTempRegister);
1062         mulDouble(fpTempRegister, dest);
1063     }
1064
1065     void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1066     {
1067         m_assembler.vmul(dest, op1, op2);
1068     }
1069
1070     void sqrtDouble(FPRegisterID src, FPRegisterID dest)
1071     {
1072         m_assembler.vsqrt(dest, src);
1073     }
1074     
1075     void absDouble(FPRegisterID src, FPRegisterID dest)
1076     {
1077         m_assembler.vabs(dest, src);
1078     }
1079
1080     void negateDouble(FPRegisterID src, FPRegisterID dest)
1081     {
1082         m_assembler.vneg(dest, src);
1083     }
1084
1085     NO_RETURN_DUE_TO_CRASH void ceilDouble(FPRegisterID, FPRegisterID)
1086     {
1087         ASSERT(!supportsFloatingPointRounding());
1088         CRASH();
1089     }
1090
1091     NO_RETURN_DUE_TO_CRASH void floorDouble(FPRegisterID, FPRegisterID)
1092     {
1093         ASSERT(!supportsFloatingPointRounding());
1094         CRASH();
1095     }
1096
1097     NO_RETURN_DUE_TO_CRASH void roundTowardZeroDouble(FPRegisterID, FPRegisterID)
1098     {
1099         ASSERT(!supportsFloatingPointRounding());
1100         CRASH();
1101     }
1102
1103     void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
1104     {
1105         m_assembler.vmov(fpTempRegister, src, src);
1106         m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
1107     }
1108
1109     void convertInt32ToDouble(Address address, FPRegisterID dest)
1110     {
1111         // Fixme: load directly into the fpr!
1112         load32(address, dataTempRegister);
1113         m_assembler.vmov(fpTempRegister, dataTempRegister, dataTempRegister);
1114         m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
1115     }
1116
1117     void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest)
1118     {
1119         // Fixme: load directly into the fpr!
1120         load32(address.m_ptr, dataTempRegister);
1121         m_assembler.vmov(fpTempRegister, dataTempRegister, dataTempRegister);
1122         m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
1123     }
1124     
1125     void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
1126     {
1127         m_assembler.vcvtds(dst, ARMRegisters::asSingle(src));
1128     }
1129     
1130     void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
1131     {
1132         m_assembler.vcvtsd(ARMRegisters::asSingle(dst), src);
1133     }
1134
1135     Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
1136     {
1137         m_assembler.vcmp(left, right);
1138         m_assembler.vmrs();
1139
1140         if (cond == DoubleNotEqual) {
1141             // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
1142             Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
1143             Jump result = makeBranch(ARMv7Assembler::ConditionNE);
1144             unordered.link(this);
1145             return result;
1146         }
1147         if (cond == DoubleEqualOrUnordered) {
1148             Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
1149             Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
1150             unordered.link(this);
1151             // We get here if either unordered or equal.
1152             Jump result = jump();
1153             notEqual.link(this);
1154             return result;
1155         }
1156         return makeBranch(cond);
1157     }
1158
1159     enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
1160     Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
1161     {
1162         // Convert into dest.
1163         m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
1164         m_assembler.vmov(dest, fpTempRegisterAsSingle());
1165
1166         // Calculate 2x dest.  If the value potentially underflowed, it will have
1167         // clamped to 0x80000000, so 2x dest is zero in this case. In the case of
1168         // overflow the result will be equal to -2.
1169         Jump underflow = branchAdd32(Zero, dest, dest, dataTempRegister);
1170         Jump noOverflow = branch32(NotEqual, dataTempRegister, TrustedImm32(-2));
1171
1172         // For BranchIfTruncateSuccessful, we branch if 'noOverflow' jumps.
1173         underflow.link(this);
1174         if (branchType == BranchIfTruncateSuccessful)
1175             return noOverflow;
1176
1177         // We'll reach the current point in the code on failure, so plant a
1178         // jump here & link the success case.
1179         Jump failure = jump();
1180         noOverflow.link(this);
1181         return failure;
1182     }
1183
1184     // Result is undefined if the value is outside of the integer range.
1185     void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
1186     {
1187         m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
1188         m_assembler.vmov(dest, fpTempRegisterAsSingle());
1189     }
1190
1191     void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
1192     {
1193         m_assembler.vcvt_floatingPointToUnsigned(fpTempRegisterAsSingle(), src);
1194         m_assembler.vmov(dest, fpTempRegisterAsSingle());
1195     }
1196     
1197     // Convert 'src' to an integer, and places the resulting 'dest'.
1198     // If the result is not representable as a 32 bit value, branch.
1199     // May also branch for some values that are representable in 32 bits
1200     // (specifically, in this case, 0).
1201     void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true)
1202     {
1203         m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
1204         m_assembler.vmov(dest, fpTempRegisterAsSingle());
1205
1206         // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
1207         m_assembler.vcvt_signedToFloatingPoint(fpTempRegister, fpTempRegisterAsSingle());
1208         failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister));
1209
1210         // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
1211         if (negZeroCheck)
1212             failureCases.append(branchTest32(Zero, dest));
1213     }
1214
1215     Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
1216     {
1217         m_assembler.vcmpz(reg);
1218         m_assembler.vmrs();
1219         Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
1220         Jump result = makeBranch(ARMv7Assembler::ConditionNE);
1221         unordered.link(this);
1222         return result;
1223     }
1224
1225     Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID)
1226     {
1227         m_assembler.vcmpz(reg);
1228         m_assembler.vmrs();
1229         Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
1230         Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
1231         unordered.link(this);
1232         // We get here if either unordered or equal.
1233         Jump result = jump();
1234         notEqual.link(this);
1235         return result;
1236     }
1237
1238     // Stack manipulation operations:
1239     //
1240     // The ABI is assumed to provide a stack abstraction to memory,
1241     // containing machine word sized units of data.  Push and pop
1242     // operations add and remove a single register sized unit of data
1243     // to or from the stack.  Peek and poke operations read or write
1244     // values on the stack, without moving the current stack position.
1245     
1246     void pop(RegisterID dest)
1247     {
1248         m_assembler.pop(dest);
1249     }
1250
1251     void push(RegisterID src)
1252     {
1253         m_assembler.push(src);
1254     }
1255
1256     void push(Address address)
1257     {
1258         load32(address, dataTempRegister);
1259         push(dataTempRegister);
1260     }
1261
1262     void push(TrustedImm32 imm)
1263     {
1264         move(imm, dataTempRegister);
1265         push(dataTempRegister);
1266     }
1267
1268     void popPair(RegisterID dest1, RegisterID dest2)
1269     {
1270         m_assembler.pop(1 << dest1 | 1 << dest2);
1271     }
1272     
1273     void pushPair(RegisterID src1, RegisterID src2)
1274     {
1275         m_assembler.push(1 << src1 | 1 << src2);
1276     }
1277     
1278     // Register move operations:
1279     //
1280     // Move values in registers.
1281
1282     void move(TrustedImm32 imm, RegisterID dest)
1283     {
1284         uint32_t value = imm.m_value;
1285
1286         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(value);
1287
1288         if (armImm.isValid())
1289             m_assembler.mov(dest, armImm);
1290         else if ((armImm = ARMThumbImmediate::makeEncodedImm(~value)).isValid())
1291             m_assembler.mvn(dest, armImm);
1292         else {
1293             m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(value));
1294             if (value & 0xffff0000)
1295                 m_assembler.movt(dest, ARMThumbImmediate::makeUInt16(value >> 16));
1296         }
1297     }
1298
1299     void move(RegisterID src, RegisterID dest)
1300     {
1301         if (src != dest)
1302             m_assembler.mov(dest, src);
1303     }
1304
1305     void move(TrustedImmPtr imm, RegisterID dest)
1306     {
1307         move(TrustedImm32(imm), dest);
1308     }
1309
1310     void swap(RegisterID reg1, RegisterID reg2)
1311     {
1312         move(reg1, dataTempRegister);
1313         move(reg2, reg1);
1314         move(dataTempRegister, reg2);
1315     }
1316
1317     void signExtend32ToPtr(RegisterID src, RegisterID dest)
1318     {
1319         move(src, dest);
1320     }
1321
1322     void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
1323     {
1324         move(src, dest);
1325     }
1326
1327     // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
1328     static RelationalCondition invert(RelationalCondition cond)
1329     {
1330         return static_cast<RelationalCondition>(cond ^ 1);
1331     }
1332
1333     void nop()
1334     {
1335         m_assembler.nop();
1336     }
1337     
1338     void memoryFence()
1339     {
1340         m_assembler.dmbSY();
1341     }
1342     
1343     static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
1344     {
1345         ARMv7Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
1346     }
1347     
1348     static ptrdiff_t maxJumpReplacementSize()
1349     {
1350         return ARMv7Assembler::maxJumpReplacementSize();
1351     }
1352
1353     static ptrdiff_t patchableJumpSize()
1354     {
1355         return ARMv7Assembler::patchableJumpSize();
1356     }
1357
1358     // Forwards / external control flow operations:
1359     //
1360     // This set of jump and conditional branch operations return a Jump
1361     // object which may linked at a later point, allow forwards jump,
1362     // or jumps that will require external linkage (after the code has been
1363     // relocated).
1364     //
1365     // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
1366     // respecitvely, for unsigned comparisons the names b, a, be, and ae are
1367     // used (representing the names 'below' and 'above').
1368     //
1369     // Operands to the comparision are provided in the expected order, e.g.
1370     // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
1371     // treated as a signed 32bit value, is less than or equal to 5.
1372     //
1373     // jz and jnz test whether the first operand is equal to zero, and take
1374     // an optional second operand of a mask under which to perform the test.
1375 private:
1376
1377     // Should we be using TEQ for equal/not-equal?
1378     void compare32AndSetFlags(RegisterID left, TrustedImm32 right)
1379     {
1380         int32_t imm = right.m_value;
1381         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
1382         if (armImm.isValid())
1383             m_assembler.cmp(left, armImm);
1384         else if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid())
1385             m_assembler.cmn(left, armImm);
1386         else {
1387             move(TrustedImm32(imm), dataTempRegister);
1388             m_assembler.cmp(left, dataTempRegister);
1389         }
1390     }
1391
1392 public:
1393     void test32(RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1394     {
1395         int32_t imm = mask.m_value;
1396
1397         if (imm == -1)
1398             m_assembler.tst(reg, reg);
1399         else {
1400             ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
1401             if (armImm.isValid()) {
1402                 if (reg == ARMRegisters::sp) {
1403                     move(reg, addressTempRegister);
1404                     m_assembler.tst(addressTempRegister, armImm);
1405                 } else
1406                     m_assembler.tst(reg, armImm);
1407             } else {
1408                 move(mask, dataTempRegister);
1409                 if (reg == ARMRegisters::sp) {
1410                     move(reg, addressTempRegister);
1411                     m_assembler.tst(addressTempRegister, dataTempRegister);
1412                 } else
1413                     m_assembler.tst(reg, dataTempRegister);
1414             }
1415         }
1416     }
1417     
1418     Jump branch(ResultCondition cond)
1419     {
1420         return Jump(makeBranch(cond));
1421     }
1422
1423     Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
1424     {
1425         m_assembler.cmp(left, right);
1426         return Jump(makeBranch(cond));
1427     }
1428
1429     Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
1430     {
1431         compare32AndSetFlags(left, right);
1432         return Jump(makeBranch(cond));
1433     }
1434
1435     Jump branch32(RelationalCondition cond, RegisterID left, Address right)
1436     {
1437         load32(right, dataTempRegister);
1438         return branch32(cond, left, dataTempRegister);
1439     }
1440
1441     Jump branch32(RelationalCondition cond, Address left, RegisterID right)
1442     {
1443         load32(left, dataTempRegister);
1444         return branch32(cond, dataTempRegister, right);
1445     }
1446
1447     Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
1448     {
1449         // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1450         load32(left, addressTempRegister);
1451         return branch32(cond, addressTempRegister, right);
1452     }
1453
1454     Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1455     {
1456         // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1457         load32(left, addressTempRegister);
1458         return branch32(cond, addressTempRegister, right);
1459     }
1460
1461     Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1462     {
1463         // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1464         load32WithUnalignedHalfWords(left, addressTempRegister);
1465         return branch32(cond, addressTempRegister, right);
1466     }
1467
1468     Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
1469     {
1470         load32(left.m_ptr, dataTempRegister);
1471         return branch32(cond, dataTempRegister, right);
1472     }
1473
1474     Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
1475     {
1476         // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1477         load32(left.m_ptr, addressTempRegister);
1478         return branch32(cond, addressTempRegister, right);
1479     }
1480
1481     Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
1482     {
1483         load32(left, dataTempRegister);
1484         return branch32(cond, dataTempRegister, right);
1485     }
1486
1487     Jump branch8(RelationalCondition cond, RegisterID left, TrustedImm32 right)
1488     {
1489         TrustedImm32 right8(static_cast<int8_t>(right.m_value));
1490         compare32AndSetFlags(left, right8);
1491         return Jump(makeBranch(cond));
1492     }
1493
1494     Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
1495     {
1496         // use addressTempRegister incase the branch8 we call uses dataTempRegister. :-/
1497         TrustedImm32 right8(static_cast<int8_t>(right.m_value));
1498         load8(left, addressTempRegister);
1499         return branch8(cond, addressTempRegister, right8);
1500     }
1501
1502     Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1503     {
1504         // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1505         TrustedImm32 right8(static_cast<int8_t>(right.m_value));
1506         load8(left, addressTempRegister);
1507         return branch32(cond, addressTempRegister, right8);
1508     }
1509     
1510     Jump branch8(RelationalCondition cond, AbsoluteAddress address, TrustedImm32 right)
1511     {
1512         // Use addressTempRegister instead of dataTempRegister, since branch32 uses dataTempRegister.
1513         TrustedImm32 right8(static_cast<int8_t>(right.m_value));
1514         move(TrustedImmPtr(address.m_ptr), addressTempRegister);
1515         load8(Address(addressTempRegister), addressTempRegister);
1516         return branch32(cond, addressTempRegister, right8);
1517     }
1518     
1519     Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
1520     {
1521         ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == PositiveOrZero);
1522         m_assembler.tst(reg, mask);
1523         return Jump(makeBranch(cond));
1524     }
1525
1526     Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1527     {
1528         ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == PositiveOrZero);
1529         test32(reg, mask);
1530         return Jump(makeBranch(cond));
1531     }
1532
1533     Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1534     {
1535         // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
1536         load32(address, addressTempRegister);
1537         return branchTest32(cond, addressTempRegister, mask);
1538     }
1539
1540     Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1541     {
1542         // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
1543         load32(address, addressTempRegister);
1544         return branchTest32(cond, addressTempRegister, mask);
1545     }
1546
1547     Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1548     {
1549         // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
1550         TrustedImm32 mask8(static_cast<int8_t>(mask.m_value));
1551         load8(address, addressTempRegister);
1552         return branchTest32(cond, addressTempRegister, mask8);
1553     }
1554
1555     Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1556     {
1557         // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
1558         TrustedImm32 mask8(static_cast<int8_t>(mask.m_value));
1559         load8(address, addressTempRegister);
1560         return branchTest32(cond, addressTempRegister, mask8);
1561     }
1562
1563     Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
1564     {
1565         // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
1566         TrustedImm32 mask8(static_cast<int8_t>(mask.m_value));
1567         move(TrustedImmPtr(address.m_ptr), addressTempRegister);
1568         load8(Address(addressTempRegister), addressTempRegister);
1569         return branchTest32(cond, addressTempRegister, mask8);
1570     }
1571
1572     void jump(RegisterID target)
1573     {
1574         m_assembler.bx(target);
1575     }
1576
1577     // Address is a memory location containing the address to jump to
1578     void jump(Address address)
1579     {
1580         load32(address, dataTempRegister);
1581         m_assembler.bx(dataTempRegister);
1582     }
1583     
1584     void jump(AbsoluteAddress address)
1585     {
1586         move(TrustedImmPtr(address.m_ptr), dataTempRegister);
1587         load32(Address(dataTempRegister), dataTempRegister);
1588         m_assembler.bx(dataTempRegister);
1589     }
1590
1591
1592     // Arithmetic control flow operations:
1593     //
1594     // This set of conditional branch operations branch based
1595     // on the result of an arithmetic operation.  The operation
1596     // is performed as normal, storing the result.
1597     //
1598     // * jz operations branch if the result is zero.
1599     // * jo operations branch if the (signed) arithmetic
1600     //   operation caused an overflow to occur.
1601     
1602     Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
1603     {
1604         m_assembler.add_S(dest, op1, op2);
1605         return Jump(makeBranch(cond));
1606     }
1607
1608     Jump branchAdd32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
1609     {
1610         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
1611         if (armImm.isValid())
1612             m_assembler.add_S(dest, op1, armImm);
1613         else {
1614             move(imm, dataTempRegister);
1615             m_assembler.add_S(dest, op1, dataTempRegister);
1616         }
1617         return Jump(makeBranch(cond));
1618     }
1619
1620     Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
1621     {
1622         return branchAdd32(cond, dest, src, dest);
1623     }
1624
1625     Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
1626     {
1627         load32(src, dataTempRegister);
1628         return branchAdd32(cond, dest, dataTempRegister, dest);
1629     }
1630
1631     Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1632     {
1633         return branchAdd32(cond, dest, imm, dest);
1634     }
1635
1636     Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
1637     {
1638         // Move the high bits of the address into addressTempRegister,
1639         // and load the value into dataTempRegister.
1640         move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
1641         m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
1642
1643         // Do the add.
1644         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
1645         if (armImm.isValid())
1646             m_assembler.add_S(dataTempRegister, dataTempRegister, armImm);
1647         else {
1648             // If the operand does not fit into an immediate then load it temporarily
1649             // into addressTempRegister; since we're overwriting addressTempRegister
1650             // we'll need to reload it with the high bits of the address afterwards.
1651             move(imm, addressTempRegister);
1652             m_assembler.add_S(dataTempRegister, dataTempRegister, addressTempRegister);
1653             move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
1654         }
1655
1656         // Store the result.
1657         m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
1658
1659         return Jump(makeBranch(cond));
1660     }
1661
1662     Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1663     {
1664         m_assembler.smull(dest, dataTempRegister, src1, src2);
1665
1666         if (cond == Overflow) {
1667             m_assembler.asr(addressTempRegister, dest, 31);
1668             return branch32(NotEqual, addressTempRegister, dataTempRegister);
1669         }
1670
1671         return branchTest32(cond, dest);
1672     }
1673
1674     Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
1675     {
1676         return branchMul32(cond, src, dest, dest);
1677     }
1678
1679     Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
1680     {
1681         move(imm, dataTempRegister);
1682         return branchMul32(cond, dataTempRegister, src, dest);
1683     }
1684
1685     Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
1686     {
1687         ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0);
1688         m_assembler.sub_S(srcDest, zero, srcDest);
1689         return Jump(makeBranch(cond));
1690     }
1691
1692     Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
1693     {
1694         m_assembler.orr_S(dest, dest, src);
1695         return Jump(makeBranch(cond));
1696     }
1697
1698     Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
1699     {
1700         m_assembler.sub_S(dest, op1, op2);
1701         return Jump(makeBranch(cond));
1702     }
1703
1704     Jump branchSub32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
1705     {
1706         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
1707         if (armImm.isValid())
1708             m_assembler.sub_S(dest, op1, armImm);
1709         else {
1710             move(imm, dataTempRegister);
1711             m_assembler.sub_S(dest, op1, dataTempRegister);
1712         }
1713         return Jump(makeBranch(cond));
1714     }
1715     
1716     Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
1717     {
1718         return branchSub32(cond, dest, src, dest);
1719     }
1720
1721     Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1722     {
1723         return branchSub32(cond, dest, imm, dest);
1724     }
1725     
1726     void relativeTableJump(RegisterID index, int scale)
1727     {
1728         ASSERT(scale >= 0 && scale <= 31);
1729
1730         // dataTempRegister will point after the jump if index register contains zero
1731         move(ARMRegisters::pc, dataTempRegister);
1732         m_assembler.add(dataTempRegister, dataTempRegister, ARMThumbImmediate::makeEncodedImm(9));
1733
1734         ShiftTypeAndAmount shift(SRType_LSL, scale);
1735         m_assembler.add(dataTempRegister, dataTempRegister, index, shift);
1736         jump(dataTempRegister);
1737     }
1738
1739     // Miscellaneous operations:
1740
1741     void breakpoint(uint8_t imm = 0)
1742     {
1743         m_assembler.bkpt(imm);
1744     }
1745
1746     ALWAYS_INLINE Call nearCall()
1747     {
1748         moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1749         return Call(m_assembler.blx(dataTempRegister), Call::LinkableNear);
1750     }
1751
1752     ALWAYS_INLINE Call nearTailCall()
1753     {
1754         moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1755         return Call(m_assembler.bx(dataTempRegister), Call::LinkableNearTail);
1756     }
1757
1758     ALWAYS_INLINE Call call()
1759     {
1760         moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1761         return Call(m_assembler.blx(dataTempRegister), Call::Linkable);
1762     }
1763
1764     ALWAYS_INLINE Call call(RegisterID target)
1765     {
1766         return Call(m_assembler.blx(target), Call::None);
1767     }
1768
1769     ALWAYS_INLINE Call call(Address address)
1770     {
1771         load32(address, dataTempRegister);
1772         return Call(m_assembler.blx(dataTempRegister), Call::None);
1773     }
1774
1775     ALWAYS_INLINE void ret()
1776     {
1777         m_assembler.bx(linkRegister);
1778     }
1779
1780     void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
1781     {
1782         m_assembler.cmp(left, right);
1783         m_assembler.it(armV7Condition(cond), false);
1784         m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1785         m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1786     }
1787
1788     void compare32(RelationalCondition cond, Address left, RegisterID right, RegisterID dest)
1789     {
1790         load32(left, dataTempRegister);
1791         compare32(cond, dataTempRegister, right, dest);
1792     }
1793
1794     void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
1795     {
1796         TrustedImm32 right8(static_cast<int8_t>(right.m_value));
1797         load8(left, addressTempRegister);
1798         compare32(cond, addressTempRegister, right8, dest);
1799     }
1800
1801     void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
1802     {
1803         compare32AndSetFlags(left, right);
1804         m_assembler.it(armV7Condition(cond), false);
1805         m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1806         m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1807     }
1808
1809     // FIXME:
1810     // The mask should be optional... paerhaps the argument order should be
1811     // dest-src, operations always have a dest? ... possibly not true, considering
1812     // asm ops like test, or pseudo ops like pop().
1813     void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1814     {
1815         load32(address, dataTempRegister);
1816         test32(dataTempRegister, mask);
1817         m_assembler.it(armV7Condition(cond), false);
1818         m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1819         m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1820     }
1821
1822     void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1823     {
1824         TrustedImm32 mask8(static_cast<int8_t>(mask.m_value));
1825         load8(address, dataTempRegister);
1826         test32(dataTempRegister, mask8);
1827         m_assembler.it(armV7Condition(cond), false);
1828         m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1829         m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1830     }
1831
1832     ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dst)
1833     {
1834         padBeforePatch();
1835         moveFixedWidthEncoding(imm, dst);
1836         return DataLabel32(this);
1837     }
1838
1839     ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dst)
1840     {
1841         padBeforePatch();
1842         moveFixedWidthEncoding(TrustedImm32(imm), dst);
1843         return DataLabelPtr(this);
1844     }
1845
1846     ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
1847     {
1848         dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
1849         return branch32(cond, left, dataTempRegister);
1850     }
1851
1852     ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
1853     {
1854         load32(left, addressTempRegister);
1855         dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
1856         return branch32(cond, addressTempRegister, dataTempRegister);
1857     }
1858     
1859     ALWAYS_INLINE Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
1860     {
1861         load32(left, addressTempRegister);
1862         dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
1863         return branch32(cond, addressTempRegister, dataTempRegister);
1864     }
1865     
1866     PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
1867     {
1868         m_makeJumpPatchable = true;
1869         Jump result = branch32(cond, left, TrustedImm32(right));
1870         m_makeJumpPatchable = false;
1871         return PatchableJump(result);
1872     }
1873     
1874     PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1875     {
1876         m_makeJumpPatchable = true;
1877         Jump result = branchTest32(cond, reg, mask);
1878         m_makeJumpPatchable = false;
1879         return PatchableJump(result);
1880     }
1881
1882     PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
1883     {
1884         m_makeJumpPatchable = true;
1885         Jump result = branch32(cond, reg, imm);
1886         m_makeJumpPatchable = false;
1887         return PatchableJump(result);
1888     }
1889
1890     PatchableJump patchableBranch32(RelationalCondition cond, Address left, TrustedImm32 imm)
1891     {
1892         m_makeJumpPatchable = true;
1893         Jump result = branch32(cond, left, imm);
1894         m_makeJumpPatchable = false;
1895         return PatchableJump(result);
1896     }
1897
1898     PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
1899     {
1900         m_makeJumpPatchable = true;
1901         Jump result = branchPtrWithPatch(cond, left, dataLabel, initialRightValue);
1902         m_makeJumpPatchable = false;
1903         return PatchableJump(result);
1904     }
1905
1906     PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
1907     {
1908         m_makeJumpPatchable = true;
1909         Jump result = branch32WithPatch(cond, left, dataLabel, initialRightValue);
1910         m_makeJumpPatchable = false;
1911         return PatchableJump(result);
1912     }
1913
1914     PatchableJump patchableJump()
1915     {
1916         padBeforePatch();
1917         m_makeJumpPatchable = true;
1918         Jump result = jump();
1919         m_makeJumpPatchable = false;
1920         return PatchableJump(result);
1921     }
1922
1923     ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
1924     {
1925         DataLabelPtr label = moveWithPatch(initialValue, dataTempRegister);
1926         store32(dataTempRegister, address);
1927         return label;
1928     }
1929     ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(TrustedImmPtr(0), address); }
1930
1931
1932     ALWAYS_INLINE Call tailRecursiveCall()
1933     {
1934         // Like a normal call, but don't link.
1935         moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1936         return Call(m_assembler.bx(dataTempRegister), Call::Linkable);
1937     }
1938
1939     ALWAYS_INLINE Call makeTailRecursiveCall(Jump oldJump)
1940     {
1941         oldJump.link(this);
1942         return tailRecursiveCall();
1943     }
1944
1945     
1946     static FunctionPtr readCallTarget(CodeLocationCall call)
1947     {
1948         return FunctionPtr(reinterpret_cast<void(*)()>(ARMv7Assembler::readCallTarget(call.dataLocation())));
1949     }
1950     
1951     static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
1952     static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
1953     
1954     static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
1955     {
1956         const unsigned twoWordOpSize = 4;
1957         return label.labelAtOffset(-twoWordOpSize * 2);
1958     }
1959     
1960     static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID rd, void* initialValue)
1961     {
1962 #if OS(LINUX)
1963         ARMv7Assembler::revertJumpTo_movT3movtcmpT2(instructionStart.dataLocation(), rd, dataTempRegister, reinterpret_cast<uintptr_t>(initialValue));
1964 #else
1965         UNUSED_PARAM(rd);
1966         ARMv7Assembler::revertJumpTo_movT3(instructionStart.dataLocation(), dataTempRegister, ARMThumbImmediate::makeUInt16(reinterpret_cast<uintptr_t>(initialValue) & 0xffff));
1967 #endif
1968     }
1969     
1970     static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
1971     {
1972         UNREACHABLE_FOR_PLATFORM();
1973         return CodeLocationLabel();
1974     }
1975     
1976     static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
1977     {
1978         UNREACHABLE_FOR_PLATFORM();
1979         return CodeLocationLabel();
1980     }
1981     
1982     static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
1983     {
1984         UNREACHABLE_FOR_PLATFORM();
1985     }
1986
1987     static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
1988     {
1989         UNREACHABLE_FOR_PLATFORM();
1990     }
1991
1992     static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
1993     {
1994         ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
1995     }
1996
1997     static void repatchCall(CodeLocationCall call, FunctionPtr destination)
1998     {
1999         ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
2000     }
2001
2002 #if ENABLE(MASM_PROBE)
2003     void probe(ProbeFunction, void* arg1, void* arg2);
2004 #endif // ENABLE(MASM_PROBE)
2005
2006 protected:
2007     ALWAYS_INLINE Jump jump()
2008     {
2009         m_assembler.label(); // Force nop-padding if we're in the middle of a watchpoint.
2010         moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
2011         return Jump(m_assembler.bx(dataTempRegister), m_makeJumpPatchable ? ARMv7Assembler::JumpNoConditionFixedSize : ARMv7Assembler::JumpNoCondition);
2012     }
2013
2014     ALWAYS_INLINE Jump makeBranch(ARMv7Assembler::Condition cond)
2015     {
2016         m_assembler.label(); // Force nop-padding if we're in the middle of a watchpoint.
2017         m_assembler.it(cond, true, true);
2018         moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
2019         return Jump(m_assembler.bx(dataTempRegister), m_makeJumpPatchable ? ARMv7Assembler::JumpConditionFixedSize : ARMv7Assembler::JumpCondition, cond);
2020     }
2021     ALWAYS_INLINE Jump makeBranch(RelationalCondition cond) { return makeBranch(armV7Condition(cond)); }
2022     ALWAYS_INLINE Jump makeBranch(ResultCondition cond) { return makeBranch(armV7Condition(cond)); }
2023     ALWAYS_INLINE Jump makeBranch(DoubleCondition cond) { return makeBranch(armV7Condition(cond)); }
2024
2025     ArmAddress setupArmAddress(BaseIndex address)
2026     {
2027         if (address.offset) {
2028             ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
2029             if (imm.isValid())
2030                 m_assembler.add(addressTempRegister, address.base, imm);
2031             else {
2032                 move(TrustedImm32(address.offset), addressTempRegister);
2033                 m_assembler.add(addressTempRegister, addressTempRegister, address.base);
2034             }
2035
2036             return ArmAddress(addressTempRegister, address.index, address.scale);
2037         } else
2038             return ArmAddress(address.base, address.index, address.scale);
2039     }
2040
2041     ArmAddress setupArmAddress(Address address)
2042     {
2043         if ((address.offset >= -0xff) && (address.offset <= 0xfff))
2044             return ArmAddress(address.base, address.offset);
2045
2046         move(TrustedImm32(address.offset), addressTempRegister);
2047         return ArmAddress(address.base, addressTempRegister);
2048     }
2049
2050     ArmAddress setupArmAddress(ImplicitAddress address)
2051     {
2052         if ((address.offset >= -0xff) && (address.offset <= 0xfff))
2053             return ArmAddress(address.base, address.offset);
2054
2055         move(TrustedImm32(address.offset), addressTempRegister);
2056         return ArmAddress(address.base, addressTempRegister);
2057     }
2058
2059     RegisterID makeBaseIndexBase(BaseIndex address)
2060     {
2061         if (!address.offset)
2062             return address.base;
2063
2064         ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
2065         if (imm.isValid())
2066             m_assembler.add(addressTempRegister, address.base, imm);
2067         else {
2068             move(TrustedImm32(address.offset), addressTempRegister);
2069             m_assembler.add(addressTempRegister, addressTempRegister, address.base);
2070         }
2071
2072         return addressTempRegister;
2073     }
2074
2075     void moveFixedWidthEncoding(TrustedImm32 imm, RegisterID dst)
2076     {
2077         uint32_t value = imm.m_value;
2078         m_assembler.movT3(dst, ARMThumbImmediate::makeUInt16(value & 0xffff));
2079         m_assembler.movt(dst, ARMThumbImmediate::makeUInt16(value >> 16));
2080     }
2081
2082     ARMv7Assembler::Condition armV7Condition(RelationalCondition cond)
2083     {
2084         return static_cast<ARMv7Assembler::Condition>(cond);
2085     }
2086
2087     ARMv7Assembler::Condition armV7Condition(ResultCondition cond)
2088     {
2089         return static_cast<ARMv7Assembler::Condition>(cond);
2090     }
2091
2092     ARMv7Assembler::Condition armV7Condition(DoubleCondition cond)
2093     {
2094         return static_cast<ARMv7Assembler::Condition>(cond);
2095     }
2096     
2097 private:
2098     friend class LinkBuffer;
2099
2100     static void linkCall(void* code, Call call, FunctionPtr function)
2101     {
2102         if (call.isFlagSet(Call::Tail))
2103             ARMv7Assembler::linkJump(code, call.m_label, function.value());
2104         else
2105             ARMv7Assembler::linkCall(code, call.m_label, function.value());
2106     }
2107
2108 #if ENABLE(MASM_PROBE)
2109     inline TrustedImm32 trustedImm32FromPtr(void* ptr)
2110     {
2111         return TrustedImm32(TrustedImmPtr(ptr));
2112     }
2113
2114     inline TrustedImm32 trustedImm32FromPtr(ProbeFunction function)
2115     {
2116         return TrustedImm32(TrustedImmPtr(reinterpret_cast<void*>(function)));
2117     }
2118
2119     inline TrustedImm32 trustedImm32FromPtr(void (*function)())
2120     {
2121         return TrustedImm32(TrustedImmPtr(reinterpret_cast<void*>(function)));
2122     }
2123 #endif
2124
2125     bool m_makeJumpPatchable;
2126 };
2127
2128 } // namespace JSC
2129
2130 #endif // ENABLE(ASSEMBLER)
2131
2132 #endif // MacroAssemblerARMv7_h