Enhance the MacroAssembler and LinkBuffer to support pointer profiling.
[WebKit-https.git] / Source / JavaScriptCore / assembler / MacroAssemblerARMv7.h
1 /*
2  * Copyright (C) 2009-2018 Apple Inc. All rights reserved.
3  * Copyright (C) 2010 University of Szeged
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
18  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
25  */
26
27 #pragma once
28
29 #if ENABLE(ASSEMBLER)
30
31 #include "ARMv7Assembler.h"
32 #include "AbstractMacroAssembler.h"
33
34 namespace JSC {
35
36 using Assembler = TARGET_ASSEMBLER;
37
38 class MacroAssemblerARMv7 : public AbstractMacroAssembler<Assembler> {
39     static const RegisterID dataTempRegister = ARMRegisters::ip;
40     static const RegisterID addressTempRegister = ARMRegisters::r6;
41
42     static const ARMRegisters::FPDoubleRegisterID fpTempRegister = ARMRegisters::d7;
43     inline ARMRegisters::FPSingleRegisterID fpTempRegisterAsSingle() { return ARMRegisters::asSingle(fpTempRegister); }
44
45 public:
46     static const unsigned numGPRs = 16;
47     static const unsigned numFPRs = 16;
48     
49     MacroAssemblerARMv7()
50         : m_makeJumpPatchable(false)
51     {
52     }
53
54     typedef ARMv7Assembler::LinkRecord LinkRecord;
55     typedef ARMv7Assembler::JumpType JumpType;
56     typedef ARMv7Assembler::JumpLinkType JumpLinkType;
57     typedef ARMv7Assembler::Condition Condition;
58
59     static const ARMv7Assembler::Condition DefaultCondition = ARMv7Assembler::ConditionInvalid;
60     static const ARMv7Assembler::JumpType DefaultJump = ARMv7Assembler::JumpNoConditionFixedSize;
61
62     static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
63     {
64         return value >= -255 && value <= 255;
65     }
66
67     Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); }
68     void* unlinkedCode() { return m_assembler.unlinkedCode(); }
69     static bool canCompact(JumpType jumpType) { return ARMv7Assembler::canCompact(jumpType); }
70     static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return ARMv7Assembler::computeJumpType(jumpType, from, to); }
71     static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return ARMv7Assembler::computeJumpType(record, from, to); }
72     static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return ARMv7Assembler::jumpSizeDelta(jumpType, jumpLinkType); }
73     static void link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction, uint8_t* to) { return ARMv7Assembler::link(record, from, fromInstruction, to); }
74
75     struct ArmAddress {
76         enum AddressType {
77             HasOffset,
78             HasIndex,
79         } type;
80         RegisterID base;
81         union {
82             int32_t offset;
83             struct {
84                 RegisterID index;
85                 Scale scale;
86             };
87         } u;
88         
89         explicit ArmAddress(RegisterID base, int32_t offset = 0)
90             : type(HasOffset)
91             , base(base)
92         {
93             u.offset = offset;
94         }
95         
96         explicit ArmAddress(RegisterID base, RegisterID index, Scale scale = TimesOne)
97             : type(HasIndex)
98             , base(base)
99         {
100             u.index = index;
101             u.scale = scale;
102         }
103     };
104     
105 public:
106     static const Scale ScalePtr = TimesFour;
107
108     enum RelationalCondition {
109         Equal = ARMv7Assembler::ConditionEQ,
110         NotEqual = ARMv7Assembler::ConditionNE,
111         Above = ARMv7Assembler::ConditionHI,
112         AboveOrEqual = ARMv7Assembler::ConditionHS,
113         Below = ARMv7Assembler::ConditionLO,
114         BelowOrEqual = ARMv7Assembler::ConditionLS,
115         GreaterThan = ARMv7Assembler::ConditionGT,
116         GreaterThanOrEqual = ARMv7Assembler::ConditionGE,
117         LessThan = ARMv7Assembler::ConditionLT,
118         LessThanOrEqual = ARMv7Assembler::ConditionLE
119     };
120
121     enum ResultCondition {
122         Overflow = ARMv7Assembler::ConditionVS,
123         Signed = ARMv7Assembler::ConditionMI,
124         PositiveOrZero = ARMv7Assembler::ConditionPL,
125         Zero = ARMv7Assembler::ConditionEQ,
126         NonZero = ARMv7Assembler::ConditionNE
127     };
128
129     enum DoubleCondition {
130         // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
131         DoubleEqual = ARMv7Assembler::ConditionEQ,
132         DoubleNotEqual = ARMv7Assembler::ConditionVC, // Not the right flag! check for this & handle differently.
133         DoubleGreaterThan = ARMv7Assembler::ConditionGT,
134         DoubleGreaterThanOrEqual = ARMv7Assembler::ConditionGE,
135         DoubleLessThan = ARMv7Assembler::ConditionLO,
136         DoubleLessThanOrEqual = ARMv7Assembler::ConditionLS,
137         // If either operand is NaN, these conditions always evaluate to true.
138         DoubleEqualOrUnordered = ARMv7Assembler::ConditionVS, // Not the right flag! check for this & handle differently.
139         DoubleNotEqualOrUnordered = ARMv7Assembler::ConditionNE,
140         DoubleGreaterThanOrUnordered = ARMv7Assembler::ConditionHI,
141         DoubleGreaterThanOrEqualOrUnordered = ARMv7Assembler::ConditionHS,
142         DoubleLessThanOrUnordered = ARMv7Assembler::ConditionLT,
143         DoubleLessThanOrEqualOrUnordered = ARMv7Assembler::ConditionLE,
144     };
145
146     static const RegisterID stackPointerRegister = ARMRegisters::sp;
147     static const RegisterID framePointerRegister = ARMRegisters::fp;
148     static const RegisterID linkRegister = ARMRegisters::lr;
149
150     // Integer arithmetic operations:
151     //
152     // Operations are typically two operand - operation(source, srcDst)
153     // For many operations the source may be an TrustedImm32, the srcDst operand
154     // may often be a memory location (explictly described using an Address
155     // object).
156
157     void add32(RegisterID src, RegisterID dest)
158     {
159         m_assembler.add(dest, dest, src);
160     }
161
162     void add32(RegisterID left, RegisterID right, RegisterID dest)
163     {
164         m_assembler.add(dest, left, right);
165     }
166
167     void add32(TrustedImm32 imm, RegisterID dest)
168     {
169         add32(imm, dest, dest);
170     }
171     
172     void add32(AbsoluteAddress src, RegisterID dest)
173     {
174         load32(src.m_ptr, dataTempRegister);
175         add32(dataTempRegister, dest);
176     }
177
178     void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
179     {
180         ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
181
182         // For adds with stack pointer destination, moving the src first to sp is
183         // needed to avoid unpredictable instruction
184         if (dest == ARMRegisters::sp && src != dest) {
185             move(src, ARMRegisters::sp);
186             src = ARMRegisters::sp;
187         }
188
189         if (armImm.isValid())
190             m_assembler.add(dest, src, armImm);
191         else {
192             move(imm, dataTempRegister);
193             m_assembler.add(dest, src, dataTempRegister);
194         }
195     }
196
197     void add32(TrustedImm32 imm, Address address)
198     {
199         load32(address, dataTempRegister);
200
201         ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
202         if (armImm.isValid())
203             m_assembler.add(dataTempRegister, dataTempRegister, armImm);
204         else {
205             // Hrrrm, since dataTempRegister holds the data loaded,
206             // use addressTempRegister to hold the immediate.
207             move(imm, addressTempRegister);
208             m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
209         }
210
211         store32(dataTempRegister, address);
212     }
213
214     void add32(Address src, RegisterID dest)
215     {
216         load32(src, dataTempRegister);
217         add32(dataTempRegister, dest);
218     }
219
220     void add32(TrustedImm32 imm, AbsoluteAddress address)
221     {
222         load32(address.m_ptr, dataTempRegister);
223
224         ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
225         if (armImm.isValid())
226             m_assembler.add(dataTempRegister, dataTempRegister, armImm);
227         else {
228             // Hrrrm, since dataTempRegister holds the data loaded,
229             // use addressTempRegister to hold the immediate.
230             move(imm, addressTempRegister);
231             m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
232         }
233
234         store32(dataTempRegister, address.m_ptr);
235     }
236
237     void getEffectiveAddress(BaseIndex address, RegisterID dest)
238     {
239         m_assembler.lsl(addressTempRegister, address.index, static_cast<int>(address.scale));
240         m_assembler.add(dest, address.base, addressTempRegister);
241         if (address.offset)
242             add32(TrustedImm32(address.offset), dest);
243     }
244
245     void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest)
246     {
247         add32(imm, srcDest);
248     }
249     
250     void add64(TrustedImm32 imm, AbsoluteAddress address)
251     {
252         move(TrustedImmPtr(address.m_ptr), addressTempRegister);
253
254         m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(0));
255         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
256         if (armImm.isValid())
257             m_assembler.add_S(dataTempRegister, dataTempRegister, armImm);
258         else {
259             move(imm, addressTempRegister);
260             m_assembler.add_S(dataTempRegister, dataTempRegister, addressTempRegister);
261             move(TrustedImmPtr(address.m_ptr), addressTempRegister);
262         }
263         m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(0));
264
265         m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(4));
266         m_assembler.adc(dataTempRegister, dataTempRegister, ARMThumbImmediate::makeEncodedImm(imm.m_value >> 31));
267         m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(4));
268     }
269
270     void and32(RegisterID op1, RegisterID op2, RegisterID dest)
271     {
272         m_assembler.ARM_and(dest, op1, op2);
273     }
274
275     void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
276     {
277         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
278         if (armImm.isValid())
279             m_assembler.ARM_and(dest, src, armImm);
280         else {
281             move(imm, dataTempRegister);
282             m_assembler.ARM_and(dest, src, dataTempRegister);
283         }
284     }
285
286     void and32(RegisterID src, RegisterID dest)
287     {
288         and32(dest, src, dest);
289     }
290
291     void and32(TrustedImm32 imm, RegisterID dest)
292     {
293         and32(imm, dest, dest);
294     }
295
296     void and32(Address src, RegisterID dest)
297     {
298         load32(src, dataTempRegister);
299         and32(dataTempRegister, dest);
300     }
301
302     void countLeadingZeros32(RegisterID src, RegisterID dest)
303     {
304         m_assembler.clz(dest, src);
305     }
306
307     void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
308     {
309         // Clamp the shift to the range 0..31
310         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
311         ASSERT(armImm.isValid());
312         m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
313
314         m_assembler.lsl(dest, src, dataTempRegister);
315     }
316
317     void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
318     {
319         m_assembler.lsl(dest, src, imm.m_value & 0x1f);
320     }
321
322     void lshift32(RegisterID shiftAmount, RegisterID dest)
323     {
324         lshift32(dest, shiftAmount, dest);
325     }
326
327     void lshift32(TrustedImm32 imm, RegisterID dest)
328     {
329         lshift32(dest, imm, dest);
330     }
331
332     void mul32(RegisterID src, RegisterID dest)
333     {
334         m_assembler.smull(dest, dataTempRegister, dest, src);
335     }
336
337     void mul32(RegisterID left, RegisterID right, RegisterID dest)
338     {
339         m_assembler.smull(dest, dataTempRegister, left, right);
340     }
341
342     void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
343     {
344         move(imm, dataTempRegister);
345         m_assembler.smull(dest, dataTempRegister, src, dataTempRegister);
346     }
347
348     void neg32(RegisterID srcDest)
349     {
350         m_assembler.neg(srcDest, srcDest);
351     }
352
353     void neg32(RegisterID src, RegisterID dest)
354     {
355         m_assembler.neg(dest, src);
356     }
357
358     void or32(RegisterID src, RegisterID dest)
359     {
360         m_assembler.orr(dest, dest, src);
361     }
362     
363     void or32(RegisterID src, AbsoluteAddress dest)
364     {
365         move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
366         load32(addressTempRegister, dataTempRegister);
367         or32(src, dataTempRegister);
368         store32(dataTempRegister, addressTempRegister);
369     }
370
371     void or32(TrustedImm32 imm, AbsoluteAddress address)
372     {
373         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
374         if (armImm.isValid()) {
375             move(TrustedImmPtr(address.m_ptr), addressTempRegister);
376             load32(addressTempRegister, dataTempRegister);
377             m_assembler.orr(dataTempRegister, dataTempRegister, armImm);
378             store32(dataTempRegister, addressTempRegister);
379         } else {
380             move(TrustedImmPtr(address.m_ptr), addressTempRegister);
381             load32(addressTempRegister, dataTempRegister);
382             move(imm, addressTempRegister);
383             m_assembler.orr(dataTempRegister, dataTempRegister, addressTempRegister);
384             move(TrustedImmPtr(address.m_ptr), addressTempRegister);
385             store32(dataTempRegister, addressTempRegister);
386         }
387     }
388
389     void or32(TrustedImm32 imm, Address address)
390     {
391         load32(address, dataTempRegister);
392         or32(imm, dataTempRegister, dataTempRegister);
393         store32(dataTempRegister, address);
394     }
395
396     void or32(TrustedImm32 imm, RegisterID dest)
397     {
398         or32(imm, dest, dest);
399     }
400
401     void or32(RegisterID op1, RegisterID op2, RegisterID dest)
402     {
403         m_assembler.orr(dest, op1, op2);
404     }
405
406     void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
407     {
408         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
409         if (armImm.isValid())
410             m_assembler.orr(dest, src, armImm);
411         else {
412             ASSERT(src != dataTempRegister);
413             move(imm, dataTempRegister);
414             m_assembler.orr(dest, src, dataTempRegister);
415         }
416     }
417
418     void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
419     {
420         // Clamp the shift to the range 0..31
421         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
422         ASSERT(armImm.isValid());
423         m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
424
425         m_assembler.asr(dest, src, dataTempRegister);
426     }
427
428     void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
429     {
430         if (!imm.m_value)
431             move(src, dest);
432         else
433             m_assembler.asr(dest, src, imm.m_value & 0x1f);
434     }
435
436     void rshift32(RegisterID shiftAmount, RegisterID dest)
437     {
438         rshift32(dest, shiftAmount, dest);
439     }
440     
441     void rshift32(TrustedImm32 imm, RegisterID dest)
442     {
443         rshift32(dest, imm, dest);
444     }
445
446     void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
447     {
448         // Clamp the shift to the range 0..31
449         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
450         ASSERT(armImm.isValid());
451         m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
452         
453         m_assembler.lsr(dest, src, dataTempRegister);
454     }
455     
456     void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
457     {
458         if (!imm.m_value)
459             move(src, dest);
460         else
461             m_assembler.lsr(dest, src, imm.m_value & 0x1f);
462     }
463
464     void urshift32(RegisterID shiftAmount, RegisterID dest)
465     {
466         urshift32(dest, shiftAmount, dest);
467     }
468     
469     void urshift32(TrustedImm32 imm, RegisterID dest)
470     {
471         urshift32(dest, imm, dest);
472     }
473
474     void sub32(RegisterID src, RegisterID dest)
475     {
476         m_assembler.sub(dest, dest, src);
477     }
478
479     void sub32(RegisterID left, RegisterID right, RegisterID dest)
480     {
481         m_assembler.sub(dest, left, right);
482     }
483
484     void sub32(TrustedImm32 imm, RegisterID dest)
485     {
486         ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
487         if (armImm.isValid())
488             m_assembler.sub(dest, dest, armImm);
489         else {
490             move(imm, dataTempRegister);
491             m_assembler.sub(dest, dest, dataTempRegister);
492         }
493     }
494
495     void sub32(TrustedImm32 imm, Address address)
496     {
497         load32(address, dataTempRegister);
498
499         ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
500         if (armImm.isValid())
501             m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
502         else {
503             // Hrrrm, since dataTempRegister holds the data loaded,
504             // use addressTempRegister to hold the immediate.
505             move(imm, addressTempRegister);
506             m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
507         }
508
509         store32(dataTempRegister, address);
510     }
511
512     void sub32(Address src, RegisterID dest)
513     {
514         load32(src, dataTempRegister);
515         sub32(dataTempRegister, dest);
516     }
517
518     void sub32(TrustedImm32 imm, AbsoluteAddress address)
519     {
520         load32(address.m_ptr, dataTempRegister);
521
522         ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
523         if (armImm.isValid())
524             m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
525         else {
526             // Hrrrm, since dataTempRegister holds the data loaded,
527             // use addressTempRegister to hold the immediate.
528             move(imm, addressTempRegister);
529             m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
530         }
531
532         store32(dataTempRegister, address.m_ptr);
533     }
534
535     void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
536     {
537         m_assembler.eor(dest, op1, op2);
538     }
539
540     void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
541     {
542         if (imm.m_value == -1) {
543             m_assembler.mvn(dest, src);
544             return;
545         }
546
547         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
548         if (armImm.isValid())
549             m_assembler.eor(dest, src, armImm);
550         else {
551             move(imm, dataTempRegister);
552             m_assembler.eor(dest, src, dataTempRegister);
553         }
554     }
555
556     void xor32(RegisterID src, RegisterID dest)
557     {
558         xor32(dest, src, dest);
559     }
560
561     void xor32(Address src, RegisterID dest)
562     {
563         load32(src, dataTempRegister);
564         xor32(dataTempRegister, dest);
565     }
566
567     void xor32(TrustedImm32 imm, RegisterID dest)
568     {
569         if (imm.m_value == -1)
570             m_assembler.mvn(dest, dest);
571         else
572             xor32(imm, dest, dest);
573     }
574     
575
576     // Memory access operations:
577     //
578     // Loads are of the form load(address, destination) and stores of the form
579     // store(source, address).  The source for a store may be an TrustedImm32.  Address
580     // operand objects to loads and store will be implicitly constructed if a
581     // register is passed.
582
583 private:
584     void load32(ArmAddress address, RegisterID dest)
585     {
586         if (address.type == ArmAddress::HasIndex)
587             m_assembler.ldr(dest, address.base, address.u.index, address.u.scale);
588         else if (address.u.offset >= 0) {
589             ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
590             ASSERT(armImm.isValid());
591             m_assembler.ldr(dest, address.base, armImm);
592         } else {
593             ASSERT(address.u.offset >= -255);
594             m_assembler.ldr(dest, address.base, address.u.offset, true, false);
595         }
596     }
597
598     void load16(ArmAddress address, RegisterID dest)
599     {
600         if (address.type == ArmAddress::HasIndex)
601             m_assembler.ldrh(dest, address.base, address.u.index, address.u.scale);
602         else if (address.u.offset >= 0) {
603             ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
604             ASSERT(armImm.isValid());
605             m_assembler.ldrh(dest, address.base, armImm);
606         } else {
607             ASSERT(address.u.offset >= -255);
608             m_assembler.ldrh(dest, address.base, address.u.offset, true, false);
609         }
610     }
611     
612     void load16SignedExtendTo32(ArmAddress address, RegisterID dest)
613     {
614         ASSERT(address.type == ArmAddress::HasIndex);
615         m_assembler.ldrsh(dest, address.base, address.u.index, address.u.scale);
616     }
617
618     void load8(ArmAddress address, RegisterID dest)
619     {
620         if (address.type == ArmAddress::HasIndex)
621             m_assembler.ldrb(dest, address.base, address.u.index, address.u.scale);
622         else if (address.u.offset >= 0) {
623             ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
624             ASSERT(armImm.isValid());
625             m_assembler.ldrb(dest, address.base, armImm);
626         } else {
627             ASSERT(address.u.offset >= -255);
628             m_assembler.ldrb(dest, address.base, address.u.offset, true, false);
629         }
630     }
631     
632     void load8SignedExtendTo32(ArmAddress address, RegisterID dest)
633     {
634         ASSERT(address.type == ArmAddress::HasIndex);
635         m_assembler.ldrsb(dest, address.base, address.u.index, address.u.scale);
636     }
637
638 protected:
639     void store32(RegisterID src, ArmAddress address)
640     {
641         if (address.type == ArmAddress::HasIndex)
642             m_assembler.str(src, address.base, address.u.index, address.u.scale);
643         else if (address.u.offset >= 0) {
644             ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
645             ASSERT(armImm.isValid());
646             m_assembler.str(src, address.base, armImm);
647         } else {
648             ASSERT(address.u.offset >= -255);
649             m_assembler.str(src, address.base, address.u.offset, true, false);
650         }
651     }
652
653 private:
654     void store8(RegisterID src, ArmAddress address)
655     {
656         if (address.type == ArmAddress::HasIndex)
657             m_assembler.strb(src, address.base, address.u.index, address.u.scale);
658         else if (address.u.offset >= 0) {
659             ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
660             ASSERT(armImm.isValid());
661             m_assembler.strb(src, address.base, armImm);
662         } else {
663             ASSERT(address.u.offset >= -255);
664             m_assembler.strb(src, address.base, address.u.offset, true, false);
665         }
666     }
667     
668     void store16(RegisterID src, ArmAddress address)
669     {
670         if (address.type == ArmAddress::HasIndex)
671             m_assembler.strh(src, address.base, address.u.index, address.u.scale);
672         else if (address.u.offset >= 0) {
673             ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
674             ASSERT(armImm.isValid());
675             m_assembler.strh(src, address.base, armImm);
676         } else {
677             ASSERT(address.u.offset >= -255);
678             m_assembler.strh(src, address.base, address.u.offset, true, false);
679         }
680     }
681
682 public:
683     void load32(ImplicitAddress address, RegisterID dest)
684     {
685         load32(setupArmAddress(address), dest);
686     }
687
688     void load32(BaseIndex address, RegisterID dest)
689     {
690         load32(setupArmAddress(address), dest);
691     }
692
693     void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
694     {
695         load32(setupArmAddress(address), dest);
696     }
697
698     void load16Unaligned(BaseIndex address, RegisterID dest)
699     {
700         load16(setupArmAddress(address), dest);
701     }
702
703     void load32(const void* address, RegisterID dest)
704     {
705         move(TrustedImmPtr(address), addressTempRegister);
706         m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
707     }
708     
709     void abortWithReason(AbortReason reason)
710     {
711         move(TrustedImm32(reason), dataTempRegister);
712         breakpoint();
713     }
714
715     void abortWithReason(AbortReason reason, intptr_t misc)
716     {
717         move(TrustedImm32(misc), addressTempRegister);
718         abortWithReason(reason);
719     }
720
721     ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
722     {
723         ConvertibleLoadLabel result(this);
724         ASSERT(address.offset >= 0 && address.offset <= 255);
725         m_assembler.ldrWide8BitImmediate(dest, address.base, address.offset);
726         return result;
727     }
728
729     void load8(ImplicitAddress address, RegisterID dest)
730     {
731         load8(setupArmAddress(address), dest);
732     }
733
734     void load8SignedExtendTo32(ImplicitAddress, RegisterID)
735     {
736         UNREACHABLE_FOR_PLATFORM();
737     }
738
739     void load8(BaseIndex address, RegisterID dest)
740     {
741         load8(setupArmAddress(address), dest);
742     }
743     
744     void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
745     {
746         load8SignedExtendTo32(setupArmAddress(address), dest);
747     }
748
749     void load8(const void* address, RegisterID dest)
750     {
751         move(TrustedImmPtr(address), dest);
752         load8(dest, dest);
753     }
754
755     DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
756     {
757         DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister);
758         load32(ArmAddress(address.base, dataTempRegister), dest);
759         return label;
760     }
761     
762     DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
763     {
764         padBeforePatch();
765
766         RegisterID base = address.base;
767         
768         DataLabelCompact label(this);
769         ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
770
771         m_assembler.ldr(dest, base, address.offset, true, false);
772         return label;
773     }
774
775     void load16(BaseIndex address, RegisterID dest)
776     {
777         m_assembler.ldrh(dest, makeBaseIndexBase(address), address.index, address.scale);
778     }
779     
780     void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
781     {
782         load16SignedExtendTo32(setupArmAddress(address), dest);
783     }
784     
785     void load16(ImplicitAddress address, RegisterID dest)
786     {
787         ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.offset);
788         if (armImm.isValid())
789             m_assembler.ldrh(dest, address.base, armImm);
790         else {
791             move(TrustedImm32(address.offset), dataTempRegister);
792             m_assembler.ldrh(dest, address.base, dataTempRegister);
793         }
794     }
795     
796     void load16SignedExtendTo32(ImplicitAddress, RegisterID)
797     {
798         UNREACHABLE_FOR_PLATFORM();
799     }
800
801     DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
802     {
803         DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister);
804         store32(src, ArmAddress(address.base, dataTempRegister));
805         return label;
806     }
807
808     void store32(RegisterID src, ImplicitAddress address)
809     {
810         store32(src, setupArmAddress(address));
811     }
812
813     void store32(RegisterID src, BaseIndex address)
814     {
815         store32(src, setupArmAddress(address));
816     }
817
818     void store32(TrustedImm32 imm, ImplicitAddress address)
819     {
820         move(imm, dataTempRegister);
821         store32(dataTempRegister, setupArmAddress(address));
822     }
823
824     void store32(TrustedImm32 imm, BaseIndex address)
825     {
826         move(imm, dataTempRegister);
827         store32(dataTempRegister, setupArmAddress(address));
828     }
829
830     void store32(RegisterID src, const void* address)
831     {
832         move(TrustedImmPtr(address), addressTempRegister);
833         m_assembler.str(src, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
834     }
835
836     void store32(TrustedImm32 imm, const void* address)
837     {
838         move(imm, dataTempRegister);
839         store32(dataTempRegister, address);
840     }
841
842     void store8(RegisterID src, Address address)
843     {
844         store8(src, setupArmAddress(address));
845     }
846     
847     void store8(RegisterID src, BaseIndex address)
848     {
849         store8(src, setupArmAddress(address));
850     }
851     
852     void store8(RegisterID src, void* address)
853     {
854         move(TrustedImmPtr(address), addressTempRegister);
855         store8(src, ArmAddress(addressTempRegister, 0));
856     }
857     
858     void store8(TrustedImm32 imm, void* address)
859     {
860         TrustedImm32 imm8(static_cast<int8_t>(imm.m_value));
861         move(imm8, dataTempRegister);
862         store8(dataTempRegister, address);
863     }
864     
865     void store8(TrustedImm32 imm, Address address)
866     {
867         TrustedImm32 imm8(static_cast<int8_t>(imm.m_value));
868         move(imm8, dataTempRegister);
869         store8(dataTempRegister, address);
870     }
871     
872     void store16(RegisterID src, BaseIndex address)
873     {
874         store16(src, setupArmAddress(address));
875     }
876
877     // Possibly clobbers src, but not on this architecture.
878     void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
879     {
880         m_assembler.vmov(dest1, dest2, src);
881     }
882     
883     void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch)
884     {
885         UNUSED_PARAM(scratch);
886         m_assembler.vmov(dest, src1, src2);
887     }
888
889     static bool shouldBlindForSpecificArch(uint32_t value)
890     {
891         ARMThumbImmediate immediate = ARMThumbImmediate::makeEncodedImm(value);
892
893         // Couldn't be encoded as an immediate, so assume it's untrusted.
894         if (!immediate.isValid())
895             return true;
896         
897         // If we can encode the immediate, we have less than 16 attacker
898         // controlled bits.
899         if (immediate.isEncodedImm())
900             return false;
901
902         // Don't let any more than 12 bits of an instruction word
903         // be controlled by an attacker.
904         return !immediate.isUInt12();
905     }
906
907     // Floating-point operations:
908
909     static bool supportsFloatingPoint() { return true; }
910     static bool supportsFloatingPointTruncate() { return true; }
911     static bool supportsFloatingPointSqrt() { return true; }
912     static bool supportsFloatingPointAbs() { return true; }
913     static bool supportsFloatingPointRounding() { return false; }
914
915     void loadDouble(ImplicitAddress address, FPRegisterID dest)
916     {
917         RegisterID base = address.base;
918         int32_t offset = address.offset;
919
920         // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
921         if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
922             add32(TrustedImm32(offset), base, addressTempRegister);
923             base = addressTempRegister;
924             offset = 0;
925         }
926         
927         m_assembler.vldr(dest, base, offset);
928     }
929
930     void loadFloat(ImplicitAddress address, FPRegisterID dest)
931     {
932         RegisterID base = address.base;
933         int32_t offset = address.offset;
934
935         // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
936         if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
937             add32(TrustedImm32(offset), base, addressTempRegister);
938             base = addressTempRegister;
939             offset = 0;
940         }
941         
942         m_assembler.flds(ARMRegisters::asSingle(dest), base, offset);
943     }
944
945     void loadDouble(BaseIndex address, FPRegisterID dest)
946     {
947         move(address.index, addressTempRegister);
948         lshift32(TrustedImm32(address.scale), addressTempRegister);
949         add32(address.base, addressTempRegister);
950         loadDouble(Address(addressTempRegister, address.offset), dest);
951     }
952     
953     void loadFloat(BaseIndex address, FPRegisterID dest)
954     {
955         move(address.index, addressTempRegister);
956         lshift32(TrustedImm32(address.scale), addressTempRegister);
957         add32(address.base, addressTempRegister);
958         loadFloat(Address(addressTempRegister, address.offset), dest);
959     }
960
961     void moveDouble(FPRegisterID src, FPRegisterID dest)
962     {
963         if (src != dest)
964             m_assembler.vmov(dest, src);
965     }
966
967     void moveZeroToDouble(FPRegisterID reg)
968     {
969         static double zeroConstant = 0.;
970         loadDouble(TrustedImmPtr(&zeroConstant), reg);
971     }
972
973     void loadDouble(TrustedImmPtr address, FPRegisterID dest)
974     {
975         move(address, addressTempRegister);
976         m_assembler.vldr(dest, addressTempRegister, 0);
977     }
978
979     void storeDouble(FPRegisterID src, ImplicitAddress address)
980     {
981         RegisterID base = address.base;
982         int32_t offset = address.offset;
983
984         // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
985         if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
986             add32(TrustedImm32(offset), base, addressTempRegister);
987             base = addressTempRegister;
988             offset = 0;
989         }
990         
991         m_assembler.vstr(src, base, offset);
992     }
993
994     void storeFloat(FPRegisterID src, ImplicitAddress address)
995     {
996         RegisterID base = address.base;
997         int32_t offset = address.offset;
998
999         // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
1000         if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
1001             add32(TrustedImm32(offset), base, addressTempRegister);
1002             base = addressTempRegister;
1003             offset = 0;
1004         }
1005         
1006         m_assembler.fsts(ARMRegisters::asSingle(src), base, offset);
1007     }
1008
1009     void storeDouble(FPRegisterID src, TrustedImmPtr address)
1010     {
1011         move(address, addressTempRegister);
1012         storeDouble(src, addressTempRegister);
1013     }
1014
1015     void storeDouble(FPRegisterID src, BaseIndex address)
1016     {
1017         move(address.index, addressTempRegister);
1018         lshift32(TrustedImm32(address.scale), addressTempRegister);
1019         add32(address.base, addressTempRegister);
1020         storeDouble(src, Address(addressTempRegister, address.offset));
1021     }
1022     
1023     void storeFloat(FPRegisterID src, BaseIndex address)
1024     {
1025         move(address.index, addressTempRegister);
1026         lshift32(TrustedImm32(address.scale), addressTempRegister);
1027         add32(address.base, addressTempRegister);
1028         storeFloat(src, Address(addressTempRegister, address.offset));
1029     }
1030     
1031     void addDouble(FPRegisterID src, FPRegisterID dest)
1032     {
1033         m_assembler.vadd(dest, dest, src);
1034     }
1035
1036     void addDouble(Address src, FPRegisterID dest)
1037     {
1038         loadDouble(src, fpTempRegister);
1039         addDouble(fpTempRegister, dest);
1040     }
1041
1042     void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1043     {
1044         m_assembler.vadd(dest, op1, op2);
1045     }
1046
1047     void addDouble(AbsoluteAddress address, FPRegisterID dest)
1048     {
1049         loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister);
1050         m_assembler.vadd(dest, dest, fpTempRegister);
1051     }
1052
1053     void divDouble(FPRegisterID src, FPRegisterID dest)
1054     {
1055         m_assembler.vdiv(dest, dest, src);
1056     }
1057
1058     void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1059     {
1060         m_assembler.vdiv(dest, op1, op2);
1061     }
1062
1063     void subDouble(FPRegisterID src, FPRegisterID dest)
1064     {
1065         m_assembler.vsub(dest, dest, src);
1066     }
1067
1068     void subDouble(Address src, FPRegisterID dest)
1069     {
1070         loadDouble(src, fpTempRegister);
1071         subDouble(fpTempRegister, dest);
1072     }
1073
1074     void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1075     {
1076         m_assembler.vsub(dest, op1, op2);
1077     }
1078
1079     void mulDouble(FPRegisterID src, FPRegisterID dest)
1080     {
1081         m_assembler.vmul(dest, dest, src);
1082     }
1083
1084     void mulDouble(Address src, FPRegisterID dest)
1085     {
1086         loadDouble(src, fpTempRegister);
1087         mulDouble(fpTempRegister, dest);
1088     }
1089
1090     void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1091     {
1092         m_assembler.vmul(dest, op1, op2);
1093     }
1094
1095     void sqrtDouble(FPRegisterID src, FPRegisterID dest)
1096     {
1097         m_assembler.vsqrt(dest, src);
1098     }
1099     
1100     void absDouble(FPRegisterID src, FPRegisterID dest)
1101     {
1102         m_assembler.vabs(dest, src);
1103     }
1104
1105     void negateDouble(FPRegisterID src, FPRegisterID dest)
1106     {
1107         m_assembler.vneg(dest, src);
1108     }
1109
1110     NO_RETURN_DUE_TO_CRASH void ceilDouble(FPRegisterID, FPRegisterID)
1111     {
1112         ASSERT(!supportsFloatingPointRounding());
1113         CRASH();
1114     }
1115
1116     NO_RETURN_DUE_TO_CRASH void floorDouble(FPRegisterID, FPRegisterID)
1117     {
1118         ASSERT(!supportsFloatingPointRounding());
1119         CRASH();
1120     }
1121
1122     NO_RETURN_DUE_TO_CRASH void roundTowardZeroDouble(FPRegisterID, FPRegisterID)
1123     {
1124         ASSERT(!supportsFloatingPointRounding());
1125         CRASH();
1126     }
1127
1128     void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
1129     {
1130         m_assembler.vmov(fpTempRegister, src, src);
1131         m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
1132     }
1133
1134     void convertInt32ToDouble(Address address, FPRegisterID dest)
1135     {
1136         // Fixme: load directly into the fpr!
1137         load32(address, dataTempRegister);
1138         m_assembler.vmov(fpTempRegister, dataTempRegister, dataTempRegister);
1139         m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
1140     }
1141
1142     void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest)
1143     {
1144         // Fixme: load directly into the fpr!
1145         load32(address.m_ptr, dataTempRegister);
1146         m_assembler.vmov(fpTempRegister, dataTempRegister, dataTempRegister);
1147         m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
1148     }
1149     
1150     void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
1151     {
1152         m_assembler.vcvtds(dst, ARMRegisters::asSingle(src));
1153     }
1154     
1155     void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
1156     {
1157         m_assembler.vcvtsd(ARMRegisters::asSingle(dst), src);
1158     }
1159
1160     Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
1161     {
1162         m_assembler.vcmp(left, right);
1163         m_assembler.vmrs();
1164
1165         if (cond == DoubleNotEqual) {
1166             // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
1167             Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
1168             Jump result = makeBranch(ARMv7Assembler::ConditionNE);
1169             unordered.link(this);
1170             return result;
1171         }
1172         if (cond == DoubleEqualOrUnordered) {
1173             Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
1174             Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
1175             unordered.link(this);
1176             // We get here if either unordered or equal.
1177             Jump result = jump();
1178             notEqual.link(this);
1179             return result;
1180         }
1181         return makeBranch(cond);
1182     }
1183
1184     enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
1185     Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
1186     {
1187         // Convert into dest.
1188         m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
1189         m_assembler.vmov(dest, fpTempRegisterAsSingle());
1190
1191         // Calculate 2x dest.  If the value potentially underflowed, it will have
1192         // clamped to 0x80000000, so 2x dest is zero in this case. In the case of
1193         // overflow the result will be equal to -2.
1194         Jump underflow = branchAdd32(Zero, dest, dest, dataTempRegister);
1195         Jump noOverflow = branch32(NotEqual, dataTempRegister, TrustedImm32(-2));
1196
1197         // For BranchIfTruncateSuccessful, we branch if 'noOverflow' jumps.
1198         underflow.link(this);
1199         if (branchType == BranchIfTruncateSuccessful)
1200             return noOverflow;
1201
1202         // We'll reach the current point in the code on failure, so plant a
1203         // jump here & link the success case.
1204         Jump failure = jump();
1205         noOverflow.link(this);
1206         return failure;
1207     }
1208
1209     // Result is undefined if the value is outside of the integer range.
1210     void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
1211     {
1212         m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
1213         m_assembler.vmov(dest, fpTempRegisterAsSingle());
1214     }
1215
1216     void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
1217     {
1218         m_assembler.vcvt_floatingPointToUnsigned(fpTempRegisterAsSingle(), src);
1219         m_assembler.vmov(dest, fpTempRegisterAsSingle());
1220     }
1221     
1222     // Convert 'src' to an integer, and places the resulting 'dest'.
1223     // If the result is not representable as a 32 bit value, branch.
1224     // May also branch for some values that are representable in 32 bits
1225     // (specifically, in this case, 0).
1226     void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true)
1227     {
1228         m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
1229         m_assembler.vmov(dest, fpTempRegisterAsSingle());
1230
1231         // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
1232         m_assembler.vcvt_signedToFloatingPoint(fpTempRegister, fpTempRegisterAsSingle());
1233         failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister));
1234
1235         // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
1236         if (negZeroCheck)
1237             failureCases.append(branchTest32(Zero, dest));
1238     }
1239
1240     Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
1241     {
1242         m_assembler.vcmpz(reg);
1243         m_assembler.vmrs();
1244         Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
1245         Jump result = makeBranch(ARMv7Assembler::ConditionNE);
1246         unordered.link(this);
1247         return result;
1248     }
1249
1250     Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID)
1251     {
1252         m_assembler.vcmpz(reg);
1253         m_assembler.vmrs();
1254         Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
1255         Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
1256         unordered.link(this);
1257         // We get here if either unordered or equal.
1258         Jump result = jump();
1259         notEqual.link(this);
1260         return result;
1261     }
1262
1263     // Stack manipulation operations:
1264     //
1265     // The ABI is assumed to provide a stack abstraction to memory,
1266     // containing machine word sized units of data.  Push and pop
1267     // operations add and remove a single register sized unit of data
1268     // to or from the stack.  Peek and poke operations read or write
1269     // values on the stack, without moving the current stack position.
1270     
1271     void pop(RegisterID dest)
1272     {
1273         m_assembler.pop(dest);
1274     }
1275
1276     void push(RegisterID src)
1277     {
1278         m_assembler.push(src);
1279     }
1280
1281     void push(Address address)
1282     {
1283         load32(address, dataTempRegister);
1284         push(dataTempRegister);
1285     }
1286
1287     void push(TrustedImm32 imm)
1288     {
1289         move(imm, dataTempRegister);
1290         push(dataTempRegister);
1291     }
1292
1293     void popPair(RegisterID dest1, RegisterID dest2)
1294     {
1295         m_assembler.pop(1 << dest1 | 1 << dest2);
1296     }
1297     
1298     void pushPair(RegisterID src1, RegisterID src2)
1299     {
1300         m_assembler.push(1 << src1 | 1 << src2);
1301     }
1302     
1303     // Register move operations:
1304     //
1305     // Move values in registers.
1306
1307     void move(TrustedImm32 imm, RegisterID dest)
1308     {
1309         uint32_t value = imm.m_value;
1310
1311         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(value);
1312
1313         if (armImm.isValid())
1314             m_assembler.mov(dest, armImm);
1315         else if ((armImm = ARMThumbImmediate::makeEncodedImm(~value)).isValid())
1316             m_assembler.mvn(dest, armImm);
1317         else {
1318             m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(value));
1319             if (value & 0xffff0000)
1320                 m_assembler.movt(dest, ARMThumbImmediate::makeUInt16(value >> 16));
1321         }
1322     }
1323
1324     void move(RegisterID src, RegisterID dest)
1325     {
1326         if (src != dest)
1327             m_assembler.mov(dest, src);
1328     }
1329
1330     void move(TrustedImmPtr imm, RegisterID dest)
1331     {
1332         move(TrustedImm32(imm), dest);
1333     }
1334
1335     void swap(RegisterID reg1, RegisterID reg2)
1336     {
1337         move(reg1, dataTempRegister);
1338         move(reg2, reg1);
1339         move(dataTempRegister, reg2);
1340     }
1341
1342     void signExtend32ToPtr(RegisterID src, RegisterID dest)
1343     {
1344         move(src, dest);
1345     }
1346
1347     void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
1348     {
1349         move(src, dest);
1350     }
1351
1352     // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
1353     static RelationalCondition invert(RelationalCondition cond)
1354     {
1355         return static_cast<RelationalCondition>(cond ^ 1);
1356     }
1357
1358     void nop()
1359     {
1360         m_assembler.nop();
1361     }
1362     
1363     void memoryFence()
1364     {
1365         m_assembler.dmbSY();
1366     }
1367     
1368     void storeFence()
1369     {
1370         m_assembler.dmbISHST();
1371     }
1372
1373     static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
1374     {
1375         ARMv7Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
1376     }
1377     
1378     static ptrdiff_t maxJumpReplacementSize()
1379     {
1380         return ARMv7Assembler::maxJumpReplacementSize();
1381     }
1382
1383     static ptrdiff_t patchableJumpSize()
1384     {
1385         return ARMv7Assembler::patchableJumpSize();
1386     }
1387
1388     // Forwards / external control flow operations:
1389     //
1390     // This set of jump and conditional branch operations return a Jump
1391     // object which may linked at a later point, allow forwards jump,
1392     // or jumps that will require external linkage (after the code has been
1393     // relocated).
1394     //
1395     // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
1396     // respecitvely, for unsigned comparisons the names b, a, be, and ae are
1397     // used (representing the names 'below' and 'above').
1398     //
1399     // Operands to the comparision are provided in the expected order, e.g.
1400     // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
1401     // treated as a signed 32bit value, is less than or equal to 5.
1402     //
1403     // jz and jnz test whether the first operand is equal to zero, and take
1404     // an optional second operand of a mask under which to perform the test.
1405 private:
1406
1407     // Should we be using TEQ for equal/not-equal?
1408     void compare32AndSetFlags(RegisterID left, TrustedImm32 right)
1409     {
1410         int32_t imm = right.m_value;
1411         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
1412         if (armImm.isValid())
1413             m_assembler.cmp(left, armImm);
1414         else if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid())
1415             m_assembler.cmn(left, armImm);
1416         else {
1417             move(TrustedImm32(imm), dataTempRegister);
1418             m_assembler.cmp(left, dataTempRegister);
1419         }
1420     }
1421
1422 public:
1423     void test32(RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1424     {
1425         int32_t imm = mask.m_value;
1426
1427         if (imm == -1)
1428             m_assembler.tst(reg, reg);
1429         else {
1430             ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
1431             if (armImm.isValid()) {
1432                 if (reg == ARMRegisters::sp) {
1433                     move(reg, addressTempRegister);
1434                     m_assembler.tst(addressTempRegister, armImm);
1435                 } else
1436                     m_assembler.tst(reg, armImm);
1437             } else {
1438                 move(mask, dataTempRegister);
1439                 if (reg == ARMRegisters::sp) {
1440                     move(reg, addressTempRegister);
1441                     m_assembler.tst(addressTempRegister, dataTempRegister);
1442                 } else
1443                     m_assembler.tst(reg, dataTempRegister);
1444             }
1445         }
1446     }
1447     
1448     Jump branch(ResultCondition cond)
1449     {
1450         return Jump(makeBranch(cond));
1451     }
1452
1453     Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
1454     {
1455         m_assembler.cmp(left, right);
1456         return Jump(makeBranch(cond));
1457     }
1458
1459     Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
1460     {
1461         compare32AndSetFlags(left, right);
1462         return Jump(makeBranch(cond));
1463     }
1464
1465     Jump branch32(RelationalCondition cond, RegisterID left, Address right)
1466     {
1467         load32(right, dataTempRegister);
1468         return branch32(cond, left, dataTempRegister);
1469     }
1470
1471     Jump branch32(RelationalCondition cond, Address left, RegisterID right)
1472     {
1473         load32(left, dataTempRegister);
1474         return branch32(cond, dataTempRegister, right);
1475     }
1476
1477     Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
1478     {
1479         // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1480         load32(left, addressTempRegister);
1481         return branch32(cond, addressTempRegister, right);
1482     }
1483
1484     Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1485     {
1486         // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1487         load32(left, addressTempRegister);
1488         return branch32(cond, addressTempRegister, right);
1489     }
1490
1491     Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1492     {
1493         // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1494         load32WithUnalignedHalfWords(left, addressTempRegister);
1495         return branch32(cond, addressTempRegister, right);
1496     }
1497
1498     Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
1499     {
1500         load32(left.m_ptr, dataTempRegister);
1501         return branch32(cond, dataTempRegister, right);
1502     }
1503
1504     Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
1505     {
1506         // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1507         load32(left.m_ptr, addressTempRegister);
1508         return branch32(cond, addressTempRegister, right);
1509     }
1510
1511     Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
1512     {
1513         load32(left, dataTempRegister);
1514         return branch32(cond, dataTempRegister, right);
1515     }
1516
1517     Jump branch8(RelationalCondition cond, RegisterID left, TrustedImm32 right)
1518     {
1519         TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
1520         compare32AndSetFlags(left, right8);
1521         return Jump(makeBranch(cond));
1522     }
1523
1524     Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
1525     {
1526         // use addressTempRegister incase the branch8 we call uses dataTempRegister. :-/
1527         TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
1528         MacroAssemblerHelpers::load8OnCondition(*this, cond, left, addressTempRegister);
1529         return branch8(cond, addressTempRegister, right8);
1530     }
1531
1532     Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1533     {
1534         // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1535         TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
1536         MacroAssemblerHelpers::load8OnCondition(*this, cond, left, addressTempRegister);
1537         return branch32(cond, addressTempRegister, right8);
1538     }
1539     
1540     Jump branch8(RelationalCondition cond, AbsoluteAddress address, TrustedImm32 right)
1541     {
1542         // Use addressTempRegister instead of dataTempRegister, since branch32 uses dataTempRegister.
1543         TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
1544         move(TrustedImmPtr(address.m_ptr), addressTempRegister);
1545         MacroAssemblerHelpers::load8OnCondition(*this, cond, Address(addressTempRegister), addressTempRegister);
1546         return branch32(cond, addressTempRegister, right8);
1547     }
1548     
1549     Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
1550     {
1551         ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == PositiveOrZero);
1552         m_assembler.tst(reg, mask);
1553         return Jump(makeBranch(cond));
1554     }
1555
1556     Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1557     {
1558         ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == PositiveOrZero);
1559         test32(reg, mask);
1560         return Jump(makeBranch(cond));
1561     }
1562
1563     Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1564     {
1565         // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
1566         load32(address, addressTempRegister);
1567         return branchTest32(cond, addressTempRegister, mask);
1568     }
1569
1570     Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1571     {
1572         // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
1573         load32(address, addressTempRegister);
1574         return branchTest32(cond, addressTempRegister, mask);
1575     }
1576
1577     Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1578     {
1579         // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
1580         TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
1581         MacroAssemblerHelpers::load8OnCondition(*this, cond, address, addressTempRegister);
1582         return branchTest32(cond, addressTempRegister, mask8);
1583     }
1584
1585     Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1586     {
1587         // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
1588         TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
1589         MacroAssemblerHelpers::load8OnCondition(*this, cond, address, addressTempRegister);
1590         return branchTest32(cond, addressTempRegister, mask8);
1591     }
1592
1593     Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
1594     {
1595         // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
1596         TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
1597         move(TrustedImmPtr(address.m_ptr), addressTempRegister);
1598         MacroAssemblerHelpers::load8OnCondition(*this, cond, Address(addressTempRegister), addressTempRegister);
1599         return branchTest32(cond, addressTempRegister, mask8);
1600     }
1601
1602     void jump(RegisterID target, PtrTag)
1603     {
1604         m_assembler.bx(target);
1605     }
1606
1607     // Address is a memory location containing the address to jump to
1608     void jump(Address address, PtrTag)
1609     {
1610         load32(address, dataTempRegister);
1611         m_assembler.bx(dataTempRegister);
1612     }
1613     
1614     void jump(AbsoluteAddress address, PtrTag)
1615     {
1616         move(TrustedImmPtr(address.m_ptr), dataTempRegister);
1617         load32(Address(dataTempRegister), dataTempRegister);
1618         m_assembler.bx(dataTempRegister);
1619     }
1620
1621
1622     // Arithmetic control flow operations:
1623     //
1624     // This set of conditional branch operations branch based
1625     // on the result of an arithmetic operation.  The operation
1626     // is performed as normal, storing the result.
1627     //
1628     // * jz operations branch if the result is zero.
1629     // * jo operations branch if the (signed) arithmetic
1630     //   operation caused an overflow to occur.
1631     
1632     Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
1633     {
1634         m_assembler.add_S(dest, op1, op2);
1635         return Jump(makeBranch(cond));
1636     }
1637
1638     Jump branchAdd32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
1639     {
1640         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
1641         if (armImm.isValid())
1642             m_assembler.add_S(dest, op1, armImm);
1643         else {
1644             move(imm, dataTempRegister);
1645             m_assembler.add_S(dest, op1, dataTempRegister);
1646         }
1647         return Jump(makeBranch(cond));
1648     }
1649
1650     Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
1651     {
1652         return branchAdd32(cond, dest, src, dest);
1653     }
1654
1655     Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
1656     {
1657         load32(src, dataTempRegister);
1658         return branchAdd32(cond, dest, dataTempRegister, dest);
1659     }
1660
1661     Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1662     {
1663         return branchAdd32(cond, dest, imm, dest);
1664     }
1665
1666     Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
1667     {
1668         // Move the high bits of the address into addressTempRegister,
1669         // and load the value into dataTempRegister.
1670         move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
1671         m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
1672
1673         // Do the add.
1674         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
1675         if (armImm.isValid())
1676             m_assembler.add_S(dataTempRegister, dataTempRegister, armImm);
1677         else {
1678             // If the operand does not fit into an immediate then load it temporarily
1679             // into addressTempRegister; since we're overwriting addressTempRegister
1680             // we'll need to reload it with the high bits of the address afterwards.
1681             move(imm, addressTempRegister);
1682             m_assembler.add_S(dataTempRegister, dataTempRegister, addressTempRegister);
1683             move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
1684         }
1685
1686         // Store the result.
1687         m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
1688
1689         return Jump(makeBranch(cond));
1690     }
1691
1692     Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1693     {
1694         m_assembler.smull(dest, dataTempRegister, src1, src2);
1695
1696         if (cond == Overflow) {
1697             m_assembler.asr(addressTempRegister, dest, 31);
1698             return branch32(NotEqual, addressTempRegister, dataTempRegister);
1699         }
1700
1701         return branchTest32(cond, dest);
1702     }
1703
1704     Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
1705     {
1706         return branchMul32(cond, src, dest, dest);
1707     }
1708
1709     Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
1710     {
1711         move(imm, dataTempRegister);
1712         return branchMul32(cond, dataTempRegister, src, dest);
1713     }
1714
1715     Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
1716     {
1717         ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0);
1718         m_assembler.sub_S(srcDest, zero, srcDest);
1719         return Jump(makeBranch(cond));
1720     }
1721
1722     Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
1723     {
1724         m_assembler.orr_S(dest, dest, src);
1725         return Jump(makeBranch(cond));
1726     }
1727
1728     Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
1729     {
1730         m_assembler.sub_S(dest, op1, op2);
1731         return Jump(makeBranch(cond));
1732     }
1733
1734     Jump branchSub32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
1735     {
1736         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
1737         if (armImm.isValid())
1738             m_assembler.sub_S(dest, op1, armImm);
1739         else {
1740             move(imm, dataTempRegister);
1741             m_assembler.sub_S(dest, op1, dataTempRegister);
1742         }
1743         return Jump(makeBranch(cond));
1744     }
1745     
1746     Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
1747     {
1748         return branchSub32(cond, dest, src, dest);
1749     }
1750
1751     Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1752     {
1753         return branchSub32(cond, dest, imm, dest);
1754     }
1755     
1756     void relativeTableJump(RegisterID index, int scale)
1757     {
1758         ASSERT(scale >= 0 && scale <= 31);
1759
1760         // dataTempRegister will point after the jump if index register contains zero
1761         move(ARMRegisters::pc, dataTempRegister);
1762         m_assembler.add(dataTempRegister, dataTempRegister, ARMThumbImmediate::makeEncodedImm(9));
1763
1764         ShiftTypeAndAmount shift(SRType_LSL, scale);
1765         m_assembler.add(dataTempRegister, dataTempRegister, index, shift);
1766         jump(dataTempRegister, NoPtrTag);
1767     }
1768
1769     // Miscellaneous operations:
1770
1771     void breakpoint(uint8_t imm = 0)
1772     {
1773         m_assembler.bkpt(imm);
1774     }
1775
1776     static bool isBreakpoint(void* address) { return ARMv7Assembler::isBkpt(address); }
1777
1778     ALWAYS_INLINE Call nearCall()
1779     {
1780         moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1781         return Call(m_assembler.blx(dataTempRegister), Call::LinkableNear);
1782     }
1783
1784     ALWAYS_INLINE Call nearTailCall()
1785     {
1786         moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1787         return Call(m_assembler.bx(dataTempRegister), Call::LinkableNearTail);
1788     }
1789
1790     ALWAYS_INLINE Call call(PtrTag)
1791     {
1792         moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1793         return Call(m_assembler.blx(dataTempRegister), Call::Linkable);
1794     }
1795
1796     ALWAYS_INLINE Call call(RegisterID target, PtrTag)
1797     {
1798         return Call(m_assembler.blx(target), Call::None);
1799     }
1800
1801     ALWAYS_INLINE Call call(Address address, PtrTag)
1802     {
1803         load32(address, dataTempRegister);
1804         return Call(m_assembler.blx(dataTempRegister), Call::None);
1805     }
1806
1807     ALWAYS_INLINE void ret()
1808     {
1809         m_assembler.bx(linkRegister);
1810     }
1811
1812     void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
1813     {
1814         m_assembler.cmp(left, right);
1815         m_assembler.it(armV7Condition(cond), false);
1816         m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1817         m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1818     }
1819
1820     void compare32(RelationalCondition cond, Address left, RegisterID right, RegisterID dest)
1821     {
1822         load32(left, dataTempRegister);
1823         compare32(cond, dataTempRegister, right, dest);
1824     }
1825
1826     void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
1827     {
1828         TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
1829         MacroAssemblerHelpers::load8OnCondition(*this, cond, left, addressTempRegister);
1830         compare32(cond, addressTempRegister, right8, dest);
1831     }
1832
1833     void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
1834     {
1835         compare32AndSetFlags(left, right);
1836         m_assembler.it(armV7Condition(cond), false);
1837         m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1838         m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1839     }
1840
1841     // FIXME:
1842     // The mask should be optional... paerhaps the argument order should be
1843     // dest-src, operations always have a dest? ... possibly not true, considering
1844     // asm ops like test, or pseudo ops like pop().
1845     void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1846     {
1847         load32(address, dataTempRegister);
1848         test32(dataTempRegister, mask);
1849         m_assembler.it(armV7Condition(cond), false);
1850         m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1851         m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1852     }
1853
1854     void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1855     {
1856         TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
1857         MacroAssemblerHelpers::load8OnCondition(*this, cond, address, dataTempRegister);
1858         test32(dataTempRegister, mask8);
1859         m_assembler.it(armV7Condition(cond), false);
1860         m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1861         m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1862     }
1863
1864     ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dst)
1865     {
1866         padBeforePatch();
1867         moveFixedWidthEncoding(imm, dst);
1868         return DataLabel32(this);
1869     }
1870
1871     ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dst)
1872     {
1873         padBeforePatch();
1874         moveFixedWidthEncoding(TrustedImm32(imm), dst);
1875         return DataLabelPtr(this);
1876     }
1877
1878     ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(nullptr))
1879     {
1880         dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
1881         return branch32(cond, left, dataTempRegister);
1882     }
1883
1884     ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(nullptr))
1885     {
1886         load32(left, addressTempRegister);
1887         dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
1888         return branch32(cond, addressTempRegister, dataTempRegister);
1889     }
1890     
1891     ALWAYS_INLINE Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
1892     {
1893         load32(left, addressTempRegister);
1894         dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
1895         return branch32(cond, addressTempRegister, dataTempRegister);
1896     }
1897     
1898     PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(nullptr))
1899     {
1900         m_makeJumpPatchable = true;
1901         Jump result = branch32(cond, left, TrustedImm32(right));
1902         m_makeJumpPatchable = false;
1903         return PatchableJump(result);
1904     }
1905     
1906     PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1907     {
1908         m_makeJumpPatchable = true;
1909         Jump result = branchTest32(cond, reg, mask);
1910         m_makeJumpPatchable = false;
1911         return PatchableJump(result);
1912     }
1913
1914     PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
1915     {
1916         m_makeJumpPatchable = true;
1917         Jump result = branch32(cond, reg, imm);
1918         m_makeJumpPatchable = false;
1919         return PatchableJump(result);
1920     }
1921
1922     PatchableJump patchableBranch32(RelationalCondition cond, Address left, TrustedImm32 imm)
1923     {
1924         m_makeJumpPatchable = true;
1925         Jump result = branch32(cond, left, imm);
1926         m_makeJumpPatchable = false;
1927         return PatchableJump(result);
1928     }
1929
1930     PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(nullptr))
1931     {
1932         m_makeJumpPatchable = true;
1933         Jump result = branchPtrWithPatch(cond, left, dataLabel, initialRightValue);
1934         m_makeJumpPatchable = false;
1935         return PatchableJump(result);
1936     }
1937
1938     PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
1939     {
1940         m_makeJumpPatchable = true;
1941         Jump result = branch32WithPatch(cond, left, dataLabel, initialRightValue);
1942         m_makeJumpPatchable = false;
1943         return PatchableJump(result);
1944     }
1945
1946     PatchableJump patchableJump()
1947     {
1948         padBeforePatch();
1949         m_makeJumpPatchable = true;
1950         Jump result = jump();
1951         m_makeJumpPatchable = false;
1952         return PatchableJump(result);
1953     }
1954
1955     ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
1956     {
1957         DataLabelPtr label = moveWithPatch(initialValue, dataTempRegister);
1958         store32(dataTempRegister, address);
1959         return label;
1960     }
1961     ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(TrustedImmPtr(nullptr), address); }
1962
1963
1964     ALWAYS_INLINE Call tailRecursiveCall()
1965     {
1966         // Like a normal call, but don't link.
1967         moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1968         return Call(m_assembler.bx(dataTempRegister), Call::Linkable);
1969     }
1970
1971     ALWAYS_INLINE Call makeTailRecursiveCall(Jump oldJump)
1972     {
1973         oldJump.link(this);
1974         return tailRecursiveCall();
1975     }
1976
1977     
1978     static FunctionPtr readCallTarget(CodeLocationCall call)
1979     {
1980         return FunctionPtr(reinterpret_cast<void(*)()>(ARMv7Assembler::readCallTarget(call.dataLocation())), CodeEntryPtrTag);
1981     }
1982     
1983     static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
1984     static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
1985     
1986     static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
1987     {
1988         const unsigned twoWordOpSize = 4;
1989         return label.labelAtOffset(-twoWordOpSize * 2);
1990     }
1991     
1992     static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID rd, void* initialValue)
1993     {
1994 #if OS(LINUX)
1995         ARMv7Assembler::revertJumpTo_movT3movtcmpT2(instructionStart.dataLocation(), rd, dataTempRegister, reinterpret_cast<uintptr_t>(initialValue));
1996 #else
1997         UNUSED_PARAM(rd);
1998         ARMv7Assembler::revertJumpTo_movT3(instructionStart.dataLocation(), dataTempRegister, ARMThumbImmediate::makeUInt16(reinterpret_cast<uintptr_t>(initialValue) & 0xffff));
1999 #endif
2000     }
2001     
2002     static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
2003     {
2004         UNREACHABLE_FOR_PLATFORM();
2005         return CodeLocationLabel();
2006     }
2007     
2008     static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
2009     {
2010         UNREACHABLE_FOR_PLATFORM();
2011         return CodeLocationLabel();
2012     }
2013     
2014     static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
2015     {
2016         UNREACHABLE_FOR_PLATFORM();
2017     }
2018
2019     static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
2020     {
2021         UNREACHABLE_FOR_PLATFORM();
2022     }
2023
2024     static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
2025     {
2026         ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
2027     }
2028
2029     static void repatchCall(CodeLocationCall call, FunctionPtr destination)
2030     {
2031         ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
2032     }
2033
2034 protected:
2035     ALWAYS_INLINE Jump jump()
2036     {
2037         m_assembler.label(); // Force nop-padding if we're in the middle of a watchpoint.
2038         moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
2039         return Jump(m_assembler.bx(dataTempRegister), m_makeJumpPatchable ? ARMv7Assembler::JumpNoConditionFixedSize : ARMv7Assembler::JumpNoCondition);
2040     }
2041
2042     ALWAYS_INLINE Jump makeBranch(ARMv7Assembler::Condition cond)
2043     {
2044         m_assembler.label(); // Force nop-padding if we're in the middle of a watchpoint.
2045         m_assembler.it(cond, true, true);
2046         moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
2047         return Jump(m_assembler.bx(dataTempRegister), m_makeJumpPatchable ? ARMv7Assembler::JumpConditionFixedSize : ARMv7Assembler::JumpCondition, cond);
2048     }
2049     ALWAYS_INLINE Jump makeBranch(RelationalCondition cond) { return makeBranch(armV7Condition(cond)); }
2050     ALWAYS_INLINE Jump makeBranch(ResultCondition cond) { return makeBranch(armV7Condition(cond)); }
2051     ALWAYS_INLINE Jump makeBranch(DoubleCondition cond) { return makeBranch(armV7Condition(cond)); }
2052
2053     ArmAddress setupArmAddress(BaseIndex address)
2054     {
2055         if (address.offset) {
2056             ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
2057             if (imm.isValid())
2058                 m_assembler.add(addressTempRegister, address.base, imm);
2059             else {
2060                 move(TrustedImm32(address.offset), addressTempRegister);
2061                 m_assembler.add(addressTempRegister, addressTempRegister, address.base);
2062             }
2063
2064             return ArmAddress(addressTempRegister, address.index, address.scale);
2065         } else
2066             return ArmAddress(address.base, address.index, address.scale);
2067     }
2068
2069     ArmAddress setupArmAddress(Address address)
2070     {
2071         if ((address.offset >= -0xff) && (address.offset <= 0xfff))
2072             return ArmAddress(address.base, address.offset);
2073
2074         move(TrustedImm32(address.offset), addressTempRegister);
2075         return ArmAddress(address.base, addressTempRegister);
2076     }
2077
2078     ArmAddress setupArmAddress(ImplicitAddress address)
2079     {
2080         if ((address.offset >= -0xff) && (address.offset <= 0xfff))
2081             return ArmAddress(address.base, address.offset);
2082
2083         move(TrustedImm32(address.offset), addressTempRegister);
2084         return ArmAddress(address.base, addressTempRegister);
2085     }
2086
2087     RegisterID makeBaseIndexBase(BaseIndex address)
2088     {
2089         if (!address.offset)
2090             return address.base;
2091
2092         ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
2093         if (imm.isValid())
2094             m_assembler.add(addressTempRegister, address.base, imm);
2095         else {
2096             move(TrustedImm32(address.offset), addressTempRegister);
2097             m_assembler.add(addressTempRegister, addressTempRegister, address.base);
2098         }
2099
2100         return addressTempRegister;
2101     }
2102
2103     void moveFixedWidthEncoding(TrustedImm32 imm, RegisterID dst)
2104     {
2105         uint32_t value = imm.m_value;
2106         m_assembler.movT3(dst, ARMThumbImmediate::makeUInt16(value & 0xffff));
2107         m_assembler.movt(dst, ARMThumbImmediate::makeUInt16(value >> 16));
2108     }
2109
2110     ARMv7Assembler::Condition armV7Condition(RelationalCondition cond)
2111     {
2112         return static_cast<ARMv7Assembler::Condition>(cond);
2113     }
2114
2115     ARMv7Assembler::Condition armV7Condition(ResultCondition cond)
2116     {
2117         return static_cast<ARMv7Assembler::Condition>(cond);
2118     }
2119
2120     ARMv7Assembler::Condition armV7Condition(DoubleCondition cond)
2121     {
2122         return static_cast<ARMv7Assembler::Condition>(cond);
2123     }
2124
2125 private:
2126     friend class LinkBuffer;
2127
2128     static void linkCall(void* code, Call call, FunctionPtr function)
2129     {
2130         if (call.isFlagSet(Call::Tail))
2131             ARMv7Assembler::linkJump(code, call.m_label, function.value());
2132         else
2133             ARMv7Assembler::linkCall(code, call.m_label, function.value());
2134     }
2135
2136     bool m_makeJumpPatchable;
2137 };
2138
2139 } // namespace JSC
2140
2141 #endif // ENABLE(ASSEMBLER)