Add some convenience utility accessor methods to MacroAssembler::CPUState.
[WebKit-https.git] / Source / JavaScriptCore / assembler / MacroAssembler.h
1 /*
2  * Copyright (C) 2008-2017 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #pragma once
27
28 #if ENABLE(ASSEMBLER)
29
30 #include "JSCJSValue.h"
31
32 #if CPU(ARM_THUMB2)
33 #include "MacroAssemblerARMv7.h"
34 namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; };
35
36 #elif CPU(ARM64)
37 #include "MacroAssemblerARM64.h"
38 namespace JSC { typedef MacroAssemblerARM64 MacroAssemblerBase; };
39
40 #elif CPU(ARM_TRADITIONAL)
41 #include "MacroAssemblerARM.h"
42 namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; };
43
44 #elif CPU(MIPS)
45 #include "MacroAssemblerMIPS.h"
46 namespace JSC {
47 typedef MacroAssemblerMIPS MacroAssemblerBase;
48 };
49
50 #elif CPU(X86)
51 #include "MacroAssemblerX86.h"
52 namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; };
53
54 #elif CPU(X86_64)
55 #include "MacroAssemblerX86_64.h"
56 namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; };
57
58 #else
59 #error "The MacroAssembler is not supported on this platform."
60 #endif
61
62 #include "MacroAssemblerHelpers.h"
63
64 namespace JSC {
65
66 namespace Printer {
67
68 struct PrintRecord;
69 typedef Vector<PrintRecord> PrintRecordList;
70
71 }
72
73 class MacroAssembler : public MacroAssemblerBase {
74 public:
75
76     static constexpr RegisterID nextRegister(RegisterID reg)
77     {
78         return static_cast<RegisterID>(reg + 1);
79     }
80     
81     static constexpr FPRegisterID nextFPRegister(FPRegisterID reg)
82     {
83         return static_cast<FPRegisterID>(reg + 1);
84     }
85     
86     static constexpr unsigned registerIndex(RegisterID reg)
87     {
88         return reg - firstRegister();
89     }
90     
91     static constexpr unsigned fpRegisterIndex(FPRegisterID reg)
92     {
93         return reg - firstFPRegister();
94     }
95     
96     static constexpr unsigned registerIndex(FPRegisterID reg)
97     {
98         return fpRegisterIndex(reg) + numberOfRegisters();
99     }
100     
101     static constexpr unsigned totalNumberOfRegisters()
102     {
103         return numberOfRegisters() + numberOfFPRegisters();
104     }
105
106     using MacroAssemblerBase::pop;
107     using MacroAssemblerBase::jump;
108     using MacroAssemblerBase::branch32;
109     using MacroAssemblerBase::compare32;
110     using MacroAssemblerBase::move;
111     using MacroAssemblerBase::moveDouble;
112     using MacroAssemblerBase::add32;
113     using MacroAssemblerBase::mul32;
114     using MacroAssemblerBase::and32;
115     using MacroAssemblerBase::branchAdd32;
116     using MacroAssemblerBase::branchMul32;
117 #if CPU(ARM64) || CPU(ARM_THUMB2) || CPU(ARM_TRADITIONAL) || CPU(X86_64) || CPU(MIPS)
118     using MacroAssemblerBase::branchPtr;
119 #endif
120     using MacroAssemblerBase::branchSub32;
121     using MacroAssemblerBase::lshift32;
122     using MacroAssemblerBase::or32;
123     using MacroAssemblerBase::rshift32;
124     using MacroAssemblerBase::store32;
125     using MacroAssemblerBase::sub32;
126     using MacroAssemblerBase::urshift32;
127     using MacroAssemblerBase::xor32;
128
129     static bool isPtrAlignedAddressOffset(ptrdiff_t value)
130     {
131         return value == static_cast<int32_t>(value);
132     }
133
134     static const double twoToThe32; // This is super useful for some double code.
135
136     // Utilities used by the DFG JIT.
137     using AbstractMacroAssemblerBase::invert;
138     using MacroAssemblerBase::invert;
139     
140     static DoubleCondition invert(DoubleCondition cond)
141     {
142         switch (cond) {
143         case DoubleEqual:
144             return DoubleNotEqualOrUnordered;
145         case DoubleNotEqual:
146             return DoubleEqualOrUnordered;
147         case DoubleGreaterThan:
148             return DoubleLessThanOrEqualOrUnordered;
149         case DoubleGreaterThanOrEqual:
150             return DoubleLessThanOrUnordered;
151         case DoubleLessThan:
152             return DoubleGreaterThanOrEqualOrUnordered;
153         case DoubleLessThanOrEqual:
154             return DoubleGreaterThanOrUnordered;
155         case DoubleEqualOrUnordered:
156             return DoubleNotEqual;
157         case DoubleNotEqualOrUnordered:
158             return DoubleEqual;
159         case DoubleGreaterThanOrUnordered:
160             return DoubleLessThanOrEqual;
161         case DoubleGreaterThanOrEqualOrUnordered:
162             return DoubleLessThan;
163         case DoubleLessThanOrUnordered:
164             return DoubleGreaterThanOrEqual;
165         case DoubleLessThanOrEqualOrUnordered:
166             return DoubleGreaterThan;
167         }
168         RELEASE_ASSERT_NOT_REACHED();
169         return DoubleEqual; // make compiler happy
170     }
171     
172     static bool isInvertible(ResultCondition cond)
173     {
174         switch (cond) {
175         case Zero:
176         case NonZero:
177         case Signed:
178         case PositiveOrZero:
179             return true;
180         default:
181             return false;
182         }
183     }
184     
185     static ResultCondition invert(ResultCondition cond)
186     {
187         switch (cond) {
188         case Zero:
189             return NonZero;
190         case NonZero:
191             return Zero;
192         case Signed:
193             return PositiveOrZero;
194         case PositiveOrZero:
195             return Signed;
196         default:
197             RELEASE_ASSERT_NOT_REACHED();
198             return Zero; // Make compiler happy for release builds.
199         }
200     }
201
202     static RelationalCondition flip(RelationalCondition cond)
203     {
204         switch (cond) {
205         case Equal:
206         case NotEqual:
207             return cond;
208         case Above:
209             return Below;
210         case AboveOrEqual:
211             return BelowOrEqual;
212         case Below:
213             return Above;
214         case BelowOrEqual:
215             return AboveOrEqual;
216         case GreaterThan:
217             return LessThan;
218         case GreaterThanOrEqual:
219             return LessThanOrEqual;
220         case LessThan:
221             return GreaterThan;
222         case LessThanOrEqual:
223             return GreaterThanOrEqual;
224         }
225
226         RELEASE_ASSERT_NOT_REACHED();
227         return Equal;
228     }
229
230     static bool isSigned(RelationalCondition cond)
231     {
232         return MacroAssemblerHelpers::isSigned<MacroAssembler>(cond);
233     }
234
235     static bool isUnsigned(RelationalCondition cond)
236     {
237         return MacroAssemblerHelpers::isUnsigned<MacroAssembler>(cond);
238     }
239
240     static bool isSigned(ResultCondition cond)
241     {
242         return MacroAssemblerHelpers::isSigned<MacroAssembler>(cond);
243     }
244
245     static bool isUnsigned(ResultCondition cond)
246     {
247         return MacroAssemblerHelpers::isUnsigned<MacroAssembler>(cond);
248     }
249
250     // Platform agnostic convenience functions,
251     // described in terms of other macro assembly methods.
252     void pop()
253     {
254         addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister);
255     }
256     
257     void peek(RegisterID dest, int index = 0)
258     {
259         loadPtr(Address(stackPointerRegister, (index * sizeof(void*))), dest);
260     }
261
262     Address addressForPoke(int index)
263     {
264         return Address(stackPointerRegister, (index * sizeof(void*)));
265     }
266     
267     void poke(RegisterID src, int index = 0)
268     {
269         storePtr(src, addressForPoke(index));
270     }
271
272     void poke(TrustedImm32 value, int index = 0)
273     {
274         store32(value, addressForPoke(index));
275     }
276
277     void poke(TrustedImmPtr imm, int index = 0)
278     {
279         storePtr(imm, addressForPoke(index));
280     }
281
282 #if !CPU(ARM64)
283     void pushToSave(RegisterID src)
284     {
285         push(src);
286     }
287     void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm)
288     {
289         push(imm);
290     }
291     void popToRestore(RegisterID dest)
292     {
293         pop(dest);
294     }
295     void pushToSave(FPRegisterID src)
296     {
297         subPtr(TrustedImm32(sizeof(double)), stackPointerRegister);
298         storeDouble(src, stackPointerRegister);
299     }
300     void popToRestore(FPRegisterID dest)
301     {
302         loadDouble(stackPointerRegister, dest);
303         addPtr(TrustedImm32(sizeof(double)), stackPointerRegister);
304     }
305     
306     static ptrdiff_t pushToSaveByteOffset() { return sizeof(void*); }
307 #endif // !CPU(ARM64)
308
309 #if CPU(X86_64) || CPU(ARM64)
310     void peek64(RegisterID dest, int index = 0)
311     {
312         load64(Address(stackPointerRegister, (index * sizeof(void*))), dest);
313     }
314
315     void poke(TrustedImm64 value, int index = 0)
316     {
317         store64(value, addressForPoke(index));
318     }
319
320     void poke64(RegisterID src, int index = 0)
321     {
322         store64(src, addressForPoke(index));
323     }
324 #endif
325     
326 #if CPU(MIPS)
327     void poke(FPRegisterID src, int index = 0)
328     {
329         ASSERT(!(index & 1));
330         storeDouble(src, addressForPoke(index));
331     }
332 #endif
333
334     // Immediate shifts only have 5 controllable bits
335     // so we'll consider them safe for now.
336     TrustedImm32 trustedImm32ForShift(Imm32 imm)
337     {
338         return TrustedImm32(imm.asTrustedImm32().m_value & 31);
339     }
340
341     // Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
342     void branchPtr(RelationalCondition cond, RegisterID op1, TrustedImmPtr imm, Label target)
343     {
344         branchPtr(cond, op1, imm).linkTo(target, this);
345     }
346     void branchPtr(RelationalCondition cond, RegisterID op1, ImmPtr imm, Label target)
347     {
348         branchPtr(cond, op1, imm).linkTo(target, this);
349     }
350
351     Jump branch32(RelationalCondition cond, RegisterID left, AbsoluteAddress right)
352     {
353         return branch32(flip(cond), right, left);
354     }
355
356     void branch32(RelationalCondition cond, RegisterID op1, RegisterID op2, Label target)
357     {
358         branch32(cond, op1, op2).linkTo(target, this);
359     }
360
361     void branch32(RelationalCondition cond, RegisterID op1, TrustedImm32 imm, Label target)
362     {
363         branch32(cond, op1, imm).linkTo(target, this);
364     }
365     
366     void branch32(RelationalCondition cond, RegisterID op1, Imm32 imm, Label target)
367     {
368         branch32(cond, op1, imm).linkTo(target, this);
369     }
370
371     void branch32(RelationalCondition cond, RegisterID left, Address right, Label target)
372     {
373         branch32(cond, left, right).linkTo(target, this);
374     }
375
376     Jump branch32(RelationalCondition cond, TrustedImm32 left, RegisterID right)
377     {
378         return branch32(commute(cond), right, left);
379     }
380
381     Jump branch32(RelationalCondition cond, Imm32 left, RegisterID right)
382     {
383         return branch32(commute(cond), right, left);
384     }
385
386     void compare32(RelationalCondition cond, Imm32 left, RegisterID right, RegisterID dest)
387     {
388         compare32(commute(cond), right, left, dest);
389     }
390
391     void branchTestPtr(ResultCondition cond, RegisterID reg, Label target)
392     {
393         branchTestPtr(cond, reg).linkTo(target, this);
394     }
395
396 #if !CPU(ARM_THUMB2) && !CPU(ARM64)
397     PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
398     {
399         return PatchableJump(branchPtr(cond, left, right));
400     }
401     
402     PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
403     {
404         return PatchableJump(branchPtrWithPatch(cond, left, dataLabel, initialRightValue));
405     }
406
407     PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
408     {
409         return PatchableJump(branch32WithPatch(cond, left, dataLabel, initialRightValue));
410     }
411
412 #if !CPU(ARM_TRADITIONAL)
413     PatchableJump patchableJump()
414     {
415         return PatchableJump(jump());
416     }
417
418     PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
419     {
420         return PatchableJump(branchTest32(cond, reg, mask));
421     }
422
423     PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
424     {
425         return PatchableJump(branch32(cond, reg, imm));
426     }
427
428     PatchableJump patchableBranch32(RelationalCondition cond, Address address, TrustedImm32 imm)
429     {
430         return PatchableJump(branch32(cond, address, imm));
431     }
432 #endif
433 #endif
434
435     void jump(Label target)
436     {
437         jump().linkTo(target, this);
438     }
439
440     // Commute a relational condition, returns a new condition that will produce
441     // the same results given the same inputs but with their positions exchanged.
442     static RelationalCondition commute(RelationalCondition condition)
443     {
444         switch (condition) {
445         case Above:
446             return Below;
447         case AboveOrEqual:
448             return BelowOrEqual;
449         case Below:
450             return Above;
451         case BelowOrEqual:
452             return AboveOrEqual;
453         case GreaterThan:
454             return LessThan;
455         case GreaterThanOrEqual:
456             return LessThanOrEqual;
457         case LessThan:
458             return GreaterThan;
459         case LessThanOrEqual:
460             return GreaterThanOrEqual;
461         default:
462             break;
463         }
464
465         ASSERT(condition == Equal || condition == NotEqual);
466         return condition;
467     }
468
469     void oops()
470     {
471         abortWithReason(B3Oops);
472     }
473
474     // B3 has additional pseudo-opcodes for returning, when it wants to signal that the return
475     // consumes some register in some way.
476     void retVoid() { ret(); }
477     void ret32(RegisterID) { ret(); }
478     void ret64(RegisterID) { ret(); }
479     void retFloat(FPRegisterID) { ret(); }
480     void retDouble(FPRegisterID) { ret(); }
481
482     static const unsigned BlindingModulus = 64;
483     bool shouldConsiderBlinding()
484     {
485         return !(random() & (BlindingModulus - 1));
486     }
487
488     void move(Address src, Address dest, RegisterID scratch)
489     {
490         loadPtr(src, scratch);
491         storePtr(scratch, dest);
492     }
493     
494     void move32(Address src, Address dest, RegisterID scratch)
495     {
496         load32(src, scratch);
497         store32(scratch, dest);
498     }
499     
500     void moveFloat(Address src, Address dest, FPRegisterID scratch)
501     {
502         loadFloat(src, scratch);
503         storeFloat(scratch, dest);
504     }
505     
506     void moveDouble(Address src, Address dest, FPRegisterID scratch)
507     {
508         loadDouble(src, scratch);
509         storeDouble(scratch, dest);
510     }
511
512     // Ptr methods
513     // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
514     // FIXME: should this use a test for 32-bitness instead of this specific exception?
515 #if !CPU(X86_64) && !CPU(ARM64)
516     void addPtr(Address src, RegisterID dest)
517     {
518         add32(src, dest);
519     }
520
521     void addPtr(AbsoluteAddress src, RegisterID dest)
522     {
523         add32(src, dest);
524     }
525
526     void addPtr(RegisterID src, RegisterID dest)
527     {
528         add32(src, dest);
529     }
530
531     void addPtr(RegisterID left, RegisterID right, RegisterID dest)
532     {
533         add32(left, right, dest);
534     }
535
536     void addPtr(TrustedImm32 imm, RegisterID srcDest)
537     {
538         add32(imm, srcDest);
539     }
540
541     void addPtr(TrustedImmPtr imm, RegisterID dest)
542     {
543         add32(TrustedImm32(imm), dest);
544     }
545
546     void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
547     {
548         add32(imm, src, dest);
549     }
550
551     void addPtr(TrustedImm32 imm, AbsoluteAddress address)
552     {
553         add32(imm, address);
554     }
555     
556     void andPtr(RegisterID src, RegisterID dest)
557     {
558         and32(src, dest);
559     }
560
561     void andPtr(TrustedImm32 imm, RegisterID srcDest)
562     {
563         and32(imm, srcDest);
564     }
565
566     void andPtr(TrustedImmPtr imm, RegisterID srcDest)
567     {
568         and32(TrustedImm32(imm), srcDest);
569     }
570
571     void lshiftPtr(Imm32 imm, RegisterID srcDest)
572     {
573         lshift32(trustedImm32ForShift(imm), srcDest);
574     }
575     
576     void rshiftPtr(Imm32 imm, RegisterID srcDest)
577     {
578         rshift32(trustedImm32ForShift(imm), srcDest);
579     }
580
581     void urshiftPtr(Imm32 imm, RegisterID srcDest)
582     {
583         urshift32(trustedImm32ForShift(imm), srcDest);
584     }
585
586     void negPtr(RegisterID dest)
587     {
588         neg32(dest);
589     }
590
591     void orPtr(RegisterID src, RegisterID dest)
592     {
593         or32(src, dest);
594     }
595
596     void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
597     {
598         or32(op1, op2, dest);
599     }
600
601     void orPtr(TrustedImmPtr imm, RegisterID dest)
602     {
603         or32(TrustedImm32(imm), dest);
604     }
605
606     void orPtr(TrustedImm32 imm, RegisterID dest)
607     {
608         or32(imm, dest);
609     }
610
611     void subPtr(RegisterID src, RegisterID dest)
612     {
613         sub32(src, dest);
614     }
615     
616     void subPtr(TrustedImm32 imm, RegisterID dest)
617     {
618         sub32(imm, dest);
619     }
620     
621     void subPtr(TrustedImmPtr imm, RegisterID dest)
622     {
623         sub32(TrustedImm32(imm), dest);
624     }
625
626     void xorPtr(RegisterID src, RegisterID dest)
627     {
628         xor32(src, dest);
629     }
630
631     void xorPtr(TrustedImm32 imm, RegisterID srcDest)
632     {
633         xor32(imm, srcDest);
634     }
635
636     void xorPtr(Address src, RegisterID dest)
637     {
638         xor32(src, dest);
639     }
640
641     void loadPtr(ImplicitAddress address, RegisterID dest)
642     {
643         load32(address, dest);
644     }
645
646     void loadPtr(BaseIndex address, RegisterID dest)
647     {
648         load32(address, dest);
649     }
650
651     void loadPtr(const void* address, RegisterID dest)
652     {
653         load32(address, dest);
654     }
655
656 #if ENABLE(FAST_TLS_JIT)
657     void loadFromTLSPtr(uint32_t offset, RegisterID dst)
658     {
659         loadFromTLS32(offset, dst);
660     }
661
662     void storeToTLSPtr(RegisterID src, uint32_t offset)
663     {
664         storeToTLS32(src, offset);
665     }
666 #endif
667
668     DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
669     {
670         return load32WithAddressOffsetPatch(address, dest);
671     }
672     
673     DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
674     {
675         return load32WithCompactAddressOffsetPatch(address, dest);
676     }
677
678     void move(ImmPtr imm, RegisterID dest)
679     {
680         move(Imm32(imm.asTrustedImmPtr()), dest);
681     }
682     
683     void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
684     {
685         compare32(cond, left, right, dest);
686     }
687
688     void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
689     {
690         compare32(cond, left, right, dest);
691     }
692     
693     void storePtr(RegisterID src, ImplicitAddress address)
694     {
695         store32(src, address);
696     }
697
698     void storePtr(RegisterID src, BaseIndex address)
699     {
700         store32(src, address);
701     }
702
703     void storePtr(RegisterID src, void* address)
704     {
705         store32(src, address);
706     }
707
708     void storePtr(TrustedImmPtr imm, ImplicitAddress address)
709     {
710         store32(TrustedImm32(imm), address);
711     }
712     
713     void storePtr(ImmPtr imm, Address address)
714     {
715         store32(Imm32(imm.asTrustedImmPtr()), address);
716     }
717
718     void storePtr(TrustedImmPtr imm, void* address)
719     {
720         store32(TrustedImm32(imm), address);
721     }
722
723     void storePtr(TrustedImm32 imm, ImplicitAddress address)
724     {
725         store32(imm, address);
726     }
727
728     void storePtr(TrustedImmPtr imm, BaseIndex address)
729     {
730         store32(TrustedImm32(imm), address);
731     }
732
733     DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
734     {
735         return store32WithAddressOffsetPatch(src, address);
736     }
737
738     Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
739     {
740         return branch32(cond, left, right);
741     }
742
743     Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
744     {
745         return branch32(cond, left, TrustedImm32(right));
746     }
747     
748     Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
749     {
750         return branch32(cond, left, Imm32(right.asTrustedImmPtr()));
751     }
752
753     Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
754     {
755         return branch32(cond, left, right);
756     }
757
758     Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
759     {
760         return branch32(cond, left, right);
761     }
762
763     Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
764     {
765         return branch32(cond, left, right);
766     }
767
768     Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
769     {
770         return branch32(cond, left, TrustedImm32(right));
771     }
772     
773     Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, TrustedImmPtr right)
774     {
775         return branch32(cond, left, TrustedImm32(right));
776     }
777
778     Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest)
779     {
780         return branchSub32(cond, src, dest);
781     }
782
783     Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
784     {
785         return branchTest32(cond, reg, mask);
786     }
787
788     Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
789     {
790         return branchTest32(cond, reg, mask);
791     }
792
793     Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
794     {
795         return branchTest32(cond, address, mask);
796     }
797
798     Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
799     {
800         return branchTest32(cond, address, mask);
801     }
802
803     Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
804     {
805         return branchAdd32(cond, src, dest);
806     }
807
808     Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
809     {
810         return branchSub32(cond, imm, dest);
811     }
812     using MacroAssemblerBase::branchTest8;
813     Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
814     {
815         return MacroAssemblerBase::branchTest8(cond, Address(address.base, address.offset), mask);
816     }
817
818 #else // !CPU(X86_64) && !CPU(ARM64)
819
820     void addPtr(RegisterID src, RegisterID dest)
821     {
822         add64(src, dest);
823     }
824
825     void addPtr(RegisterID left, RegisterID right, RegisterID dest)
826     {
827         add64(left, right, dest);
828     }
829     
830     void addPtr(Address src, RegisterID dest)
831     {
832         add64(src, dest);
833     }
834
835     void addPtr(TrustedImm32 imm, RegisterID srcDest)
836     {
837         add64(imm, srcDest);
838     }
839
840     void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
841     {
842         add64(imm, src, dest);
843     }
844
845     void addPtr(TrustedImm32 imm, Address address)
846     {
847         add64(imm, address);
848     }
849
850     void addPtr(AbsoluteAddress src, RegisterID dest)
851     {
852         add64(src, dest);
853     }
854
855     void addPtr(TrustedImmPtr imm, RegisterID dest)
856     {
857         add64(TrustedImm64(imm), dest);
858     }
859
860     void addPtr(TrustedImm32 imm, AbsoluteAddress address)
861     {
862         add64(imm, address);
863     }
864
865     void andPtr(RegisterID src, RegisterID dest)
866     {
867         and64(src, dest);
868     }
869
870     void andPtr(TrustedImm32 imm, RegisterID srcDest)
871     {
872         and64(imm, srcDest);
873     }
874     
875     void andPtr(TrustedImmPtr imm, RegisterID srcDest)
876     {
877         and64(imm, srcDest);
878     }
879     
880     void lshiftPtr(Imm32 imm, RegisterID srcDest)
881     {
882         lshift64(trustedImm32ForShift(imm), srcDest);
883     }
884
885     void rshiftPtr(Imm32 imm, RegisterID srcDest)
886     {
887         rshift64(trustedImm32ForShift(imm), srcDest);
888     }
889
890     void urshiftPtr(Imm32 imm, RegisterID srcDest)
891     {
892         urshift64(trustedImm32ForShift(imm), srcDest);
893     }
894
895     void negPtr(RegisterID dest)
896     {
897         neg64(dest);
898     }
899
900     void orPtr(RegisterID src, RegisterID dest)
901     {
902         or64(src, dest);
903     }
904
905     void orPtr(TrustedImm32 imm, RegisterID dest)
906     {
907         or64(imm, dest);
908     }
909
910     void orPtr(TrustedImmPtr imm, RegisterID dest)
911     {
912         or64(TrustedImm64(imm), dest);
913     }
914
915     void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
916     {
917         or64(op1, op2, dest);
918     }
919
920     void orPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
921     {
922         or64(imm, src, dest);
923     }
924     
925     void rotateRightPtr(TrustedImm32 imm, RegisterID srcDst)
926     {
927         rotateRight64(imm, srcDst);
928     }
929
930     void subPtr(RegisterID src, RegisterID dest)
931     {
932         sub64(src, dest);
933     }
934     
935     void subPtr(TrustedImm32 imm, RegisterID dest)
936     {
937         sub64(imm, dest);
938     }
939     
940     void subPtr(TrustedImmPtr imm, RegisterID dest)
941     {
942         sub64(TrustedImm64(imm), dest);
943     }
944
945     void xorPtr(RegisterID src, RegisterID dest)
946     {
947         xor64(src, dest);
948     }
949     
950     void xorPtr(Address src, RegisterID dest)
951     {
952         xor64(src, dest);
953     }
954     
955     void xorPtr(RegisterID src, Address dest)
956     {
957         xor64(src, dest);
958     }
959
960     void xorPtr(TrustedImm32 imm, RegisterID srcDest)
961     {
962         xor64(imm, srcDest);
963     }
964
965     void loadPtr(ImplicitAddress address, RegisterID dest)
966     {
967         load64(address, dest);
968     }
969
970     void loadPtr(BaseIndex address, RegisterID dest)
971     {
972         load64(address, dest);
973     }
974
975     void loadPtr(const void* address, RegisterID dest)
976     {
977         load64(address, dest);
978     }
979
980 #if ENABLE(FAST_TLS_JIT)
981     void loadFromTLSPtr(uint32_t offset, RegisterID dst)
982     {
983         loadFromTLS64(offset, dst);
984     }
985     void storeToTLSPtr(RegisterID src, uint32_t offset)
986     {
987         storeToTLS64(src, offset);
988     }
989 #endif
990
991     DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
992     {
993         return load64WithAddressOffsetPatch(address, dest);
994     }
995     
996     DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
997     {
998         return load64WithCompactAddressOffsetPatch(address, dest);
999     }
1000
1001     void storePtr(RegisterID src, ImplicitAddress address)
1002     {
1003         store64(src, address);
1004     }
1005
1006     void storePtr(RegisterID src, BaseIndex address)
1007     {
1008         store64(src, address);
1009     }
1010     
1011     void storePtr(RegisterID src, void* address)
1012     {
1013         store64(src, address);
1014     }
1015
1016     void storePtr(TrustedImmPtr imm, ImplicitAddress address)
1017     {
1018         store64(TrustedImm64(imm), address);
1019     }
1020
1021     void storePtr(TrustedImm32 imm, ImplicitAddress address)
1022     {
1023         store64(imm, address);
1024     }
1025
1026     void storePtr(TrustedImmPtr imm, BaseIndex address)
1027     {
1028         store64(TrustedImm64(imm), address);
1029     }
1030
1031     DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
1032     {
1033         return store64WithAddressOffsetPatch(src, address);
1034     }
1035
1036     void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
1037     {
1038         compare64(cond, left, right, dest);
1039     }
1040     
1041     void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
1042     {
1043         compare64(cond, left, right, dest);
1044     }
1045     
1046     void testPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
1047     {
1048         test64(cond, reg, mask, dest);
1049     }
1050
1051     void testPtr(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest)
1052     {
1053         test64(cond, reg, mask, dest);
1054     }
1055
1056     Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
1057     {
1058         return branch64(cond, left, right);
1059     }
1060
1061     Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
1062     {
1063         return branch64(cond, left, TrustedImm64(right));
1064     }
1065     
1066     Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
1067     {
1068         return branch64(cond, left, right);
1069     }
1070
1071     Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
1072     {
1073         return branch64(cond, left, right);
1074     }
1075
1076     Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
1077     {
1078         return branch64(cond, left, right);
1079     }
1080
1081     Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
1082     {
1083         return branch64(cond, left, TrustedImm64(right));
1084     }
1085
1086     Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
1087     {
1088         return branchTest64(cond, reg, mask);
1089     }
1090     
1091     Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1092     {
1093         return branchTest64(cond, reg, mask);
1094     }
1095
1096     Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1097     {
1098         return branchTest64(cond, address, mask);
1099     }
1100
1101     Jump branchTestPtr(ResultCondition cond, Address address, RegisterID reg)
1102     {
1103         return branchTest64(cond, address, reg);
1104     }
1105
1106     Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1107     {
1108         return branchTest64(cond, address, mask);
1109     }
1110
1111     Jump branchTestPtr(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
1112     {
1113         return branchTest64(cond, address, mask);
1114     }
1115
1116     Jump branchAddPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1117     {
1118         return branchAdd64(cond, imm, dest);
1119     }
1120
1121     Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
1122     {
1123         return branchAdd64(cond, src, dest);
1124     }
1125
1126     Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1127     {
1128         return branchSub64(cond, imm, dest);
1129     }
1130
1131     Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest)
1132     {
1133         return branchSub64(cond, src, dest);
1134     }
1135
1136     Jump branchSubPtr(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
1137     {
1138         return branchSub64(cond, src1, src2, dest);
1139     }
1140
1141     using MacroAssemblerBase::and64;
1142     using MacroAssemblerBase::convertInt32ToDouble;
1143     using MacroAssemblerBase::store64;
1144     bool shouldBlindDouble(double value)
1145     {
1146         // Don't trust NaN or +/-Infinity
1147         if (!std::isfinite(value))
1148             return shouldConsiderBlinding();
1149
1150         // Try to force normalisation, and check that there's no change
1151         // in the bit pattern
1152         if (bitwise_cast<uint64_t>(value * 1.0) != bitwise_cast<uint64_t>(value))
1153             return shouldConsiderBlinding();
1154
1155         value = fabs(value);
1156         // Only allow a limited set of fractional components
1157         double scaledValue = value * 8;
1158         if (scaledValue / 8 != value)
1159             return shouldConsiderBlinding();
1160         double frac = scaledValue - floor(scaledValue);
1161         if (frac != 0.0)
1162             return shouldConsiderBlinding();
1163
1164         return value > 0xff;
1165     }
1166     
1167     bool shouldBlindPointerForSpecificArch(uintptr_t value)
1168     {
1169         if (sizeof(void*) == 4)
1170             return shouldBlindForSpecificArch(static_cast<uint32_t>(value));
1171         return shouldBlindForSpecificArch(static_cast<uint64_t>(value));
1172     }
1173     
1174     bool shouldBlind(ImmPtr imm)
1175     {
1176         if (!canBlind())
1177             return false;
1178         
1179 #if ENABLE(FORCED_JIT_BLINDING)
1180         UNUSED_PARAM(imm);
1181         // Debug always blind all constants, if only so we know
1182         // if we've broken blinding during patch development.
1183         return true;
1184 #endif
1185
1186         // First off we'll special case common, "safe" values to avoid hurting
1187         // performance too much
1188         uintptr_t value = imm.asTrustedImmPtr().asIntptr();
1189         switch (value) {
1190         case 0xffff:
1191         case 0xffffff:
1192         case 0xffffffffL:
1193         case 0xffffffffffL:
1194         case 0xffffffffffffL:
1195         case 0xffffffffffffffL:
1196         case 0xffffffffffffffffL:
1197             return false;
1198         default: {
1199             if (value <= 0xff)
1200                 return false;
1201             if (~value <= 0xff)
1202                 return false;
1203         }
1204         }
1205
1206         if (!shouldConsiderBlinding())
1207             return false;
1208
1209         return shouldBlindPointerForSpecificArch(value);
1210     }
1211     
1212     struct RotatedImmPtr {
1213         RotatedImmPtr(uintptr_t v1, uint8_t v2)
1214             : value(v1)
1215             , rotation(v2)
1216         {
1217         }
1218         TrustedImmPtr value;
1219         TrustedImm32 rotation;
1220     };
1221     
1222     RotatedImmPtr rotationBlindConstant(ImmPtr imm)
1223     {
1224         uint8_t rotation = random() % (sizeof(void*) * 8);
1225         uintptr_t value = imm.asTrustedImmPtr().asIntptr();
1226         value = (value << rotation) | (value >> (sizeof(void*) * 8 - rotation));
1227         return RotatedImmPtr(value, rotation);
1228     }
1229     
1230     void loadRotationBlindedConstant(RotatedImmPtr constant, RegisterID dest)
1231     {
1232         move(constant.value, dest);
1233         rotateRightPtr(constant.rotation, dest);
1234     }
1235
1236     bool shouldBlind(Imm64 imm)
1237     {
1238 #if ENABLE(FORCED_JIT_BLINDING)
1239         UNUSED_PARAM(imm);
1240         // Debug always blind all constants, if only so we know
1241         // if we've broken blinding during patch development.
1242         return true;        
1243 #endif
1244
1245         // First off we'll special case common, "safe" values to avoid hurting
1246         // performance too much
1247         uint64_t value = imm.asTrustedImm64().m_value;
1248         switch (value) {
1249         case 0xffff:
1250         case 0xffffff:
1251         case 0xffffffffL:
1252         case 0xffffffffffL:
1253         case 0xffffffffffffL:
1254         case 0xffffffffffffffL:
1255         case 0xffffffffffffffffL:
1256             return false;
1257         default: {
1258             if (value <= 0xff)
1259                 return false;
1260             if (~value <= 0xff)
1261                 return false;
1262
1263             JSValue jsValue = JSValue::decode(value);
1264             if (jsValue.isInt32())
1265                 return shouldBlind(Imm32(jsValue.asInt32()));
1266             if (jsValue.isDouble() && !shouldBlindDouble(jsValue.asDouble()))
1267                 return false;
1268
1269             if (!shouldBlindDouble(bitwise_cast<double>(value)))
1270                 return false;
1271         }
1272         }
1273
1274         if (!shouldConsiderBlinding())
1275             return false;
1276
1277         return shouldBlindForSpecificArch(value);
1278     }
1279     
1280     struct RotatedImm64 {
1281         RotatedImm64(uint64_t v1, uint8_t v2)
1282             : value(v1)
1283             , rotation(v2)
1284         {
1285         }
1286         TrustedImm64 value;
1287         TrustedImm32 rotation;
1288     };
1289     
1290     RotatedImm64 rotationBlindConstant(Imm64 imm)
1291     {
1292         uint8_t rotation = random() % (sizeof(int64_t) * 8);
1293         uint64_t value = imm.asTrustedImm64().m_value;
1294         value = (value << rotation) | (value >> (sizeof(int64_t) * 8 - rotation));
1295         return RotatedImm64(value, rotation);
1296     }
1297     
1298     void loadRotationBlindedConstant(RotatedImm64 constant, RegisterID dest)
1299     {
1300         move(constant.value, dest);
1301         rotateRight64(constant.rotation, dest);
1302     }
1303
1304     void convertInt32ToDouble(Imm32 imm, FPRegisterID dest)
1305     {
1306         if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
1307             RegisterID scratchRegister = scratchRegisterForBlinding();
1308             loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister);
1309             convertInt32ToDouble(scratchRegister, dest);
1310         } else
1311             convertInt32ToDouble(imm.asTrustedImm32(), dest);
1312     }
1313
1314     void move(ImmPtr imm, RegisterID dest)
1315     {
1316         if (shouldBlind(imm))
1317             loadRotationBlindedConstant(rotationBlindConstant(imm), dest);
1318         else
1319             move(imm.asTrustedImmPtr(), dest);
1320     }
1321
1322     void move(Imm64 imm, RegisterID dest)
1323     {
1324         if (shouldBlind(imm))
1325             loadRotationBlindedConstant(rotationBlindConstant(imm), dest);
1326         else
1327             move(imm.asTrustedImm64(), dest);
1328     }
1329
1330 #if CPU(X86_64) || CPU(ARM64)
1331     void moveDouble(Imm64 imm, FPRegisterID dest)
1332     {
1333         move(imm, scratchRegister());
1334         move64ToDouble(scratchRegister(), dest);
1335     }
1336 #endif
1337
1338     void and64(Imm32 imm, RegisterID dest)
1339     {
1340         if (shouldBlind(imm)) {
1341             BlindedImm32 key = andBlindedConstant(imm);
1342             and64(key.value1, dest);
1343             and64(key.value2, dest);
1344         } else
1345             and64(imm.asTrustedImm32(), dest);
1346     }
1347
1348     Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
1349     {
1350         if (shouldBlind(right) && haveScratchRegisterForBlinding()) {
1351             RegisterID scratchRegister = scratchRegisterForBlinding();
1352             loadRotationBlindedConstant(rotationBlindConstant(right), scratchRegister);
1353             return branchPtr(cond, left, scratchRegister);
1354         }
1355         return branchPtr(cond, left, right.asTrustedImmPtr());
1356     }
1357     
1358     void storePtr(ImmPtr imm, Address dest)
1359     {
1360         if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
1361             RegisterID scratchRegister = scratchRegisterForBlinding();
1362             loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
1363             storePtr(scratchRegister, dest);
1364         } else
1365             storePtr(imm.asTrustedImmPtr(), dest);
1366     }
1367
1368     void store64(Imm64 imm, Address dest)
1369     {
1370         if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
1371             RegisterID scratchRegister = scratchRegisterForBlinding();
1372             loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
1373             store64(scratchRegister, dest);
1374         } else
1375             store64(imm.asTrustedImm64(), dest);
1376     }
1377
1378 #endif // !CPU(X86_64)
1379
1380 #if ENABLE(B3_JIT)
1381     // We should implement this the right way eventually, but for now, it's fine because it arises so
1382     // infrequently.
1383     void compareDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID dest)
1384     {
1385         move(TrustedImm32(0), dest);
1386         Jump falseCase = branchDouble(invert(cond), left, right);
1387         move(TrustedImm32(1), dest);
1388         falseCase.link(this);
1389     }
1390     void compareFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID dest)
1391     {
1392         move(TrustedImm32(0), dest);
1393         Jump falseCase = branchFloat(invert(cond), left, right);
1394         move(TrustedImm32(1), dest);
1395         falseCase.link(this);
1396     }
1397 #endif
1398
1399     void lea32(Address address, RegisterID dest)
1400     {
1401         add32(TrustedImm32(address.offset), address.base, dest);
1402     }
1403
1404 #if CPU(X86_64) || CPU(ARM64)
1405     void lea64(Address address, RegisterID dest)
1406     {
1407         add64(TrustedImm32(address.offset), address.base, dest);
1408     }
1409 #endif // CPU(X86_64) || CPU(ARM64)
1410
1411     bool shouldBlind(Imm32 imm)
1412     {
1413 #if ENABLE(FORCED_JIT_BLINDING)
1414         UNUSED_PARAM(imm);
1415         // Debug always blind all constants, if only so we know
1416         // if we've broken blinding during patch development.
1417         return true;
1418 #else // ENABLE(FORCED_JIT_BLINDING)
1419
1420         // First off we'll special case common, "safe" values to avoid hurting
1421         // performance too much
1422         uint32_t value = imm.asTrustedImm32().m_value;
1423         switch (value) {
1424         case 0xffff:
1425         case 0xffffff:
1426         case 0xffffffff:
1427             return false;
1428         default:
1429             if (value <= 0xff)
1430                 return false;
1431             if (~value <= 0xff)
1432                 return false;
1433         }
1434
1435         if (!shouldConsiderBlinding())
1436             return false;
1437
1438         return shouldBlindForSpecificArch(value);
1439 #endif // ENABLE(FORCED_JIT_BLINDING)
1440     }
1441
1442     struct BlindedImm32 {
1443         BlindedImm32(int32_t v1, int32_t v2)
1444             : value1(v1)
1445             , value2(v2)
1446         {
1447         }
1448         TrustedImm32 value1;
1449         TrustedImm32 value2;
1450     };
1451
1452     uint32_t keyForConstant(uint32_t value, uint32_t& mask)
1453     {
1454         uint32_t key = random();
1455         if (value <= 0xff)
1456             mask = 0xff;
1457         else if (value <= 0xffff)
1458             mask = 0xffff;
1459         else if (value <= 0xffffff)
1460             mask = 0xffffff;
1461         else
1462             mask = 0xffffffff;
1463         return key & mask;
1464     }
1465
1466     uint32_t keyForConstant(uint32_t value)
1467     {
1468         uint32_t mask = 0;
1469         return keyForConstant(value, mask);
1470     }
1471
1472     BlindedImm32 xorBlindConstant(Imm32 imm)
1473     {
1474         uint32_t baseValue = imm.asTrustedImm32().m_value;
1475         uint32_t key = keyForConstant(baseValue);
1476         return BlindedImm32(baseValue ^ key, key);
1477     }
1478
1479     BlindedImm32 additionBlindedConstant(Imm32 imm)
1480     {
1481         // The addition immediate may be used as a pointer offset. Keep aligned based on "imm".
1482         static uint32_t maskTable[4] = { 0xfffffffc, 0xffffffff, 0xfffffffe, 0xffffffff };
1483
1484         uint32_t baseValue = imm.asTrustedImm32().m_value;
1485         uint32_t key = keyForConstant(baseValue) & maskTable[baseValue & 3];
1486         if (key > baseValue)
1487             key = key - baseValue;
1488         return BlindedImm32(baseValue - key, key);
1489     }
1490     
1491     BlindedImm32 andBlindedConstant(Imm32 imm)
1492     {
1493         uint32_t baseValue = imm.asTrustedImm32().m_value;
1494         uint32_t mask = 0;
1495         uint32_t key = keyForConstant(baseValue, mask);
1496         ASSERT((baseValue & mask) == baseValue);
1497         return BlindedImm32(((baseValue & key) | ~key) & mask, ((baseValue & ~key) | key) & mask);
1498     }
1499     
1500     BlindedImm32 orBlindedConstant(Imm32 imm)
1501     {
1502         uint32_t baseValue = imm.asTrustedImm32().m_value;
1503         uint32_t mask = 0;
1504         uint32_t key = keyForConstant(baseValue, mask);
1505         ASSERT((baseValue & mask) == baseValue);
1506         return BlindedImm32((baseValue & key) & mask, (baseValue & ~key) & mask);
1507     }
1508     
1509     void loadXorBlindedConstant(BlindedImm32 constant, RegisterID dest)
1510     {
1511         move(constant.value1, dest);
1512         xor32(constant.value2, dest);
1513     }
1514     
1515     void add32(Imm32 imm, RegisterID dest)
1516     {
1517         if (shouldBlind(imm)) {
1518             BlindedImm32 key = additionBlindedConstant(imm);
1519             add32(key.value1, dest);
1520             add32(key.value2, dest);
1521         } else
1522             add32(imm.asTrustedImm32(), dest);
1523     }
1524
1525     void add32(Imm32 imm, RegisterID src, RegisterID dest)
1526     {
1527         if (shouldBlind(imm)) {
1528             BlindedImm32 key = additionBlindedConstant(imm);
1529             add32(key.value1, src, dest);
1530             add32(key.value2, dest);
1531         } else
1532             add32(imm.asTrustedImm32(), src, dest);
1533     }
1534     
1535     void addPtr(Imm32 imm, RegisterID dest)
1536     {
1537         if (shouldBlind(imm)) {
1538             BlindedImm32 key = additionBlindedConstant(imm);
1539             addPtr(key.value1, dest);
1540             addPtr(key.value2, dest);
1541         } else
1542             addPtr(imm.asTrustedImm32(), dest);
1543     }
1544
1545     void mul32(Imm32 imm, RegisterID src, RegisterID dest)
1546     {
1547         if (shouldBlind(imm)) {
1548             if (src != dest || haveScratchRegisterForBlinding()) {
1549                 if (src == dest) {
1550                     move(src, scratchRegisterForBlinding());
1551                     src = scratchRegisterForBlinding();
1552                 }
1553                 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1554                 mul32(src, dest);
1555                 return;
1556             }
1557             // If we don't have a scratch register available for use, we'll just
1558             // place a random number of nops.
1559             uint32_t nopCount = random() & 3;
1560             while (nopCount--)
1561                 nop();
1562         }
1563         mul32(imm.asTrustedImm32(), src, dest);
1564     }
1565
1566     void and32(Imm32 imm, RegisterID dest)
1567     {
1568         if (shouldBlind(imm)) {
1569             BlindedImm32 key = andBlindedConstant(imm);
1570             and32(key.value1, dest);
1571             and32(key.value2, dest);
1572         } else
1573             and32(imm.asTrustedImm32(), dest);
1574     }
1575
1576     void andPtr(Imm32 imm, RegisterID dest)
1577     {
1578         if (shouldBlind(imm)) {
1579             BlindedImm32 key = andBlindedConstant(imm);
1580             andPtr(key.value1, dest);
1581             andPtr(key.value2, dest);
1582         } else
1583             andPtr(imm.asTrustedImm32(), dest);
1584     }
1585     
1586     void and32(Imm32 imm, RegisterID src, RegisterID dest)
1587     {
1588         if (shouldBlind(imm)) {
1589             if (src == dest)
1590                 return and32(imm.asTrustedImm32(), dest);
1591             loadXorBlindedConstant(xorBlindConstant(imm), dest);
1592             and32(src, dest);
1593         } else
1594             and32(imm.asTrustedImm32(), src, dest);
1595     }
1596
1597     void move(Imm32 imm, RegisterID dest)
1598     {
1599         if (shouldBlind(imm))
1600             loadXorBlindedConstant(xorBlindConstant(imm), dest);
1601         else
1602             move(imm.asTrustedImm32(), dest);
1603     }
1604     
1605     void or32(Imm32 imm, RegisterID src, RegisterID dest)
1606     {
1607         if (shouldBlind(imm)) {
1608             if (src == dest)
1609                 return or32(imm, dest);
1610             loadXorBlindedConstant(xorBlindConstant(imm), dest);
1611             or32(src, dest);
1612         } else
1613             or32(imm.asTrustedImm32(), src, dest);
1614     }
1615     
1616     void or32(Imm32 imm, RegisterID dest)
1617     {
1618         if (shouldBlind(imm)) {
1619             BlindedImm32 key = orBlindedConstant(imm);
1620             or32(key.value1, dest);
1621             or32(key.value2, dest);
1622         } else
1623             or32(imm.asTrustedImm32(), dest);
1624     }
1625     
1626     void poke(Imm32 value, int index = 0)
1627     {
1628         store32(value, addressForPoke(index));
1629     }
1630     
1631     void poke(ImmPtr value, int index = 0)
1632     {
1633         storePtr(value, addressForPoke(index));
1634     }
1635     
1636 #if CPU(X86_64) || CPU(ARM64)
1637     void poke(Imm64 value, int index = 0)
1638     {
1639         store64(value, addressForPoke(index));
1640     }
1641 #endif // CPU(X86_64)
1642     
1643     void store32(Imm32 imm, Address dest)
1644     {
1645         if (shouldBlind(imm)) {
1646 #if CPU(X86) || CPU(X86_64)
1647             BlindedImm32 blind = xorBlindConstant(imm);
1648             store32(blind.value1, dest);
1649             xor32(blind.value2, dest);
1650 #else // CPU(X86) || CPU(X86_64)
1651             if (haveScratchRegisterForBlinding()) {
1652                 loadXorBlindedConstant(xorBlindConstant(imm), scratchRegisterForBlinding());
1653                 store32(scratchRegisterForBlinding(), dest);
1654             } else {
1655                 // If we don't have a scratch register available for use, we'll just 
1656                 // place a random number of nops.
1657                 uint32_t nopCount = random() & 3;
1658                 while (nopCount--)
1659                     nop();
1660                 store32(imm.asTrustedImm32(), dest);
1661             }
1662 #endif // CPU(X86) || CPU(X86_64)
1663         } else
1664             store32(imm.asTrustedImm32(), dest);
1665     }
1666     
1667     void sub32(Imm32 imm, RegisterID dest)
1668     {
1669         if (shouldBlind(imm)) {
1670             BlindedImm32 key = additionBlindedConstant(imm);
1671             sub32(key.value1, dest);
1672             sub32(key.value2, dest);
1673         } else
1674             sub32(imm.asTrustedImm32(), dest);
1675     }
1676     
1677     void subPtr(Imm32 imm, RegisterID dest)
1678     {
1679         if (shouldBlind(imm)) {
1680             BlindedImm32 key = additionBlindedConstant(imm);
1681             subPtr(key.value1, dest);
1682             subPtr(key.value2, dest);
1683         } else
1684             subPtr(imm.asTrustedImm32(), dest);
1685     }
1686     
1687     void xor32(Imm32 imm, RegisterID src, RegisterID dest)
1688     {
1689         if (shouldBlind(imm)) {
1690             BlindedImm32 blind = xorBlindConstant(imm);
1691             xor32(blind.value1, src, dest);
1692             xor32(blind.value2, dest);
1693         } else
1694             xor32(imm.asTrustedImm32(), src, dest);
1695     }
1696     
1697     void xor32(Imm32 imm, RegisterID dest)
1698     {
1699         if (shouldBlind(imm)) {
1700             BlindedImm32 blind = xorBlindConstant(imm);
1701             xor32(blind.value1, dest);
1702             xor32(blind.value2, dest);
1703         } else
1704             xor32(imm.asTrustedImm32(), dest);
1705     }
1706
1707     Jump branch32(RelationalCondition cond, RegisterID left, Imm32 right)
1708     {
1709         if (shouldBlind(right)) {
1710             if (haveScratchRegisterForBlinding()) {
1711                 loadXorBlindedConstant(xorBlindConstant(right), scratchRegisterForBlinding());
1712                 return branch32(cond, left, scratchRegisterForBlinding());
1713             }
1714             // If we don't have a scratch register available for use, we'll just 
1715             // place a random number of nops.
1716             uint32_t nopCount = random() & 3;
1717             while (nopCount--)
1718                 nop();
1719             return branch32(cond, left, right.asTrustedImm32());
1720         }
1721         
1722         return branch32(cond, left, right.asTrustedImm32());
1723     }
1724
1725     void compare32(RelationalCondition cond, RegisterID left, Imm32 right, RegisterID dest)
1726     {
1727         if (shouldBlind(right)) {
1728             if (left != dest || haveScratchRegisterForBlinding()) {
1729                 RegisterID blindedConstantReg = dest;
1730                 if (left == dest)
1731                     blindedConstantReg = scratchRegisterForBlinding();
1732                 loadXorBlindedConstant(xorBlindConstant(right), blindedConstantReg);
1733                 compare32(cond, left, blindedConstantReg, dest);
1734                 return;
1735             }
1736             // If we don't have a scratch register available for use, we'll just
1737             // place a random number of nops.
1738             uint32_t nopCount = random() & 3;
1739             while (nopCount--)
1740                 nop();
1741             compare32(cond, left, right.asTrustedImm32(), dest);
1742             return;
1743         }
1744
1745         compare32(cond, left, right.asTrustedImm32(), dest);
1746     }
1747
1748     Jump branchAdd32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest)
1749     {
1750         if (shouldBlind(imm)) {
1751             if (src != dest || haveScratchRegisterForBlinding()) {
1752                 if (src == dest) {
1753                     move(src, scratchRegisterForBlinding());
1754                     src = scratchRegisterForBlinding();
1755                 }
1756                 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1757                 return branchAdd32(cond, src, dest);
1758             }
1759             // If we don't have a scratch register available for use, we'll just
1760             // place a random number of nops.
1761             uint32_t nopCount = random() & 3;
1762             while (nopCount--)
1763                 nop();
1764         }
1765         return branchAdd32(cond, src, imm.asTrustedImm32(), dest);            
1766     }
1767     
1768     Jump branchMul32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest)
1769     {
1770         if (src == dest)
1771             ASSERT(haveScratchRegisterForBlinding());
1772
1773         if (shouldBlind(imm)) {
1774             if (src == dest) {
1775                 move(src, scratchRegisterForBlinding());
1776                 src = scratchRegisterForBlinding();
1777             }
1778             loadXorBlindedConstant(xorBlindConstant(imm), dest);
1779             return branchMul32(cond, src, dest);  
1780         }
1781         return branchMul32(cond, src, imm.asTrustedImm32(), dest);
1782     }
1783
1784     // branchSub32 takes a scratch register as 32 bit platforms make use of this,
1785     // with src == dst, and on x86-32 we don't have a platform scratch register.
1786     Jump branchSub32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest, RegisterID scratch)
1787     {
1788         if (shouldBlind(imm)) {
1789             ASSERT(scratch != dest);
1790             ASSERT(scratch != src);
1791             loadXorBlindedConstant(xorBlindConstant(imm), scratch);
1792             return branchSub32(cond, src, scratch, dest);
1793         }
1794         return branchSub32(cond, src, imm.asTrustedImm32(), dest);            
1795     }
1796     
1797     void lshift32(Imm32 imm, RegisterID dest)
1798     {
1799         lshift32(trustedImm32ForShift(imm), dest);
1800     }
1801     
1802     void lshift32(RegisterID src, Imm32 amount, RegisterID dest)
1803     {
1804         lshift32(src, trustedImm32ForShift(amount), dest);
1805     }
1806     
1807     void rshift32(Imm32 imm, RegisterID dest)
1808     {
1809         rshift32(trustedImm32ForShift(imm), dest);
1810     }
1811     
1812     void rshift32(RegisterID src, Imm32 amount, RegisterID dest)
1813     {
1814         rshift32(src, trustedImm32ForShift(amount), dest);
1815     }
1816     
1817     void urshift32(Imm32 imm, RegisterID dest)
1818     {
1819         urshift32(trustedImm32ForShift(imm), dest);
1820     }
1821     
1822     void urshift32(RegisterID src, Imm32 amount, RegisterID dest)
1823     {
1824         urshift32(src, trustedImm32ForShift(amount), dest);
1825     }
1826
1827     struct CPUState;
1828
1829     // This function emits code to preserve the CPUState (e.g. registers),
1830     // call a user supplied probe function, and restore the CPUState before
1831     // continuing with other JIT generated code.
1832     //
1833     // The user supplied probe function will be called with a single pointer to
1834     // a ProbeContext struct (defined below) which contains, among other things,
1835     // the preserved CPUState. This allows the user probe function to inspect
1836     // the CPUState at that point in the JIT generated code.
1837     //
1838     // If the user probe function alters the register values in the ProbeContext,
1839     // the altered values will be loaded into the CPU registers when the probe
1840     // returns.
1841     //
1842     // The ProbeContext is stack allocated and is only valid for the duration
1843     // of the call to the user probe function.
1844     //
1845     // Note: this version of probe() should be implemented by the target specific
1846     // MacroAssembler.
1847     void probe(ProbeFunction, void* arg);
1848
1849     JS_EXPORT_PRIVATE void probe(std::function<void(ProbeContext*)>);
1850
1851     // Let's you print from your JIT generated code.
1852     // See comments in MacroAssemblerPrinter.h for examples of how to use this.
1853     template<typename... Arguments>
1854     void print(Arguments&&... args);
1855
1856     void print(Printer::PrintRecordList*);
1857 };
1858
1859 struct MacroAssembler::CPUState {
1860     static inline const char* gprName(RegisterID id) { return MacroAssembler::gprName(id); }
1861     static inline const char* sprName(SPRegisterID id) { return MacroAssembler::sprName(id); }
1862     static inline const char* fprName(FPRegisterID id) { return MacroAssembler::fprName(id); }
1863     inline uintptr_t& gpr(RegisterID);
1864     inline uintptr_t& spr(SPRegisterID);
1865     inline double& fpr(FPRegisterID);
1866
1867     template<typename T, typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
1868     T gpr(RegisterID) const;
1869     template<typename T, typename std::enable_if<std::is_pointer<T>::value>::type* = nullptr>
1870     T gpr(RegisterID) const;
1871     template<typename T> T fpr(FPRegisterID) const;
1872
1873     void*& pc();
1874     void*& fp();
1875     void*& sp();
1876     template<typename T> T pc() const;
1877     template<typename T> T fp() const;
1878     template<typename T> T sp() const;
1879
1880     uintptr_t gprs[MacroAssembler::numberOfRegisters()];
1881     uintptr_t sprs[MacroAssembler::numberOfSPRegisters()];
1882     double fprs[MacroAssembler::numberOfFPRegisters()];
1883 };
1884
1885 inline uintptr_t& MacroAssembler::CPUState::gpr(RegisterID id)
1886 {
1887     ASSERT(id >= MacroAssembler::firstRegister() && id <= MacroAssembler::lastRegister());
1888     return gprs[id];
1889 }
1890
1891 inline uintptr_t& MacroAssembler::CPUState::spr(SPRegisterID id)
1892 {
1893     ASSERT(id >= MacroAssembler::firstSPRegister() && id <= MacroAssembler::lastSPRegister());
1894     return sprs[id];
1895 }
1896
1897 inline double& MacroAssembler::CPUState::fpr(FPRegisterID id)
1898 {
1899     ASSERT(id >= MacroAssembler::firstFPRegister() && id <= MacroAssembler::lastFPRegister());
1900     return fprs[id];
1901 }
1902
1903 template<typename T, typename std::enable_if<std::is_integral<T>::value>::type*>
1904 T MacroAssembler::CPUState::gpr(RegisterID id) const
1905 {
1906     CPUState* cpu = const_cast<CPUState*>(this);
1907     return static_cast<T>(cpu->gpr(id));
1908 }
1909
1910 template<typename T, typename std::enable_if<std::is_pointer<T>::value>::type*>
1911 T MacroAssembler::CPUState::gpr(RegisterID id) const
1912 {
1913     CPUState* cpu = const_cast<CPUState*>(this);
1914     return reinterpret_cast<T>(cpu->gpr(id));
1915 }
1916
1917 template<typename T>
1918 T MacroAssembler::CPUState::fpr(FPRegisterID id) const
1919 {
1920     CPUState* cpu = const_cast<CPUState*>(this);
1921     return bitwise_cast<T>(cpu->fpr(id));
1922 }
1923
1924 inline void*& MacroAssembler::CPUState::pc()
1925 {
1926 #if CPU(X86) || CPU(X86_64)
1927     return *reinterpret_cast<void**>(&spr(X86Registers::eip));
1928 #elif CPU(ARM64)
1929     return *reinterpret_cast<void**>(&spr(ARM64Registers::pc));
1930 #elif CPU(ARM_THUMB2) || CPU(ARM_TRADITIONAL)
1931     return *reinterpret_cast<void**>(&gpr(ARMRegisters::pc));
1932 #elif CPU(MIPS)
1933     RELEASE_ASSERT_NOT_REACHED();
1934 #else
1935 #error "Unsupported CPU"
1936 #endif
1937 }
1938
1939 inline void*& MacroAssembler::CPUState::fp()
1940 {
1941 #if CPU(X86) || CPU(X86_64)
1942     return *reinterpret_cast<void**>(&gpr(X86Registers::ebp));
1943 #elif CPU(ARM64)
1944     return *reinterpret_cast<void**>(&gpr(ARM64Registers::fp));
1945 #elif CPU(ARM_THUMB2) || CPU(ARM_TRADITIONAL)
1946     return *reinterpret_cast<void**>(&gpr(ARMRegisters::fp));
1947 #elif CPU(MIPS)
1948     return *reinterpret_cast<void**>(&gpr(MIPSRegisters::fp));
1949 #else
1950 #error "Unsupported CPU"
1951 #endif
1952 }
1953
1954 inline void*& MacroAssembler::CPUState::sp()
1955 {
1956 #if CPU(X86) || CPU(X86_64)
1957     return *reinterpret_cast<void**>(&gpr(X86Registers::esp));
1958 #elif CPU(ARM64)
1959     return *reinterpret_cast<void**>(&gpr(ARM64Registers::sp));
1960 #elif CPU(ARM_THUMB2) || CPU(ARM_TRADITIONAL)
1961     return *reinterpret_cast<void**>(&gpr(ARMRegisters::sp));
1962 #elif CPU(MIPS)
1963     return *reinterpret_cast<void**>(&gpr(MIPSRegisters::sp));
1964 #else
1965 #error "Unsupported CPU"
1966 #endif
1967 }
1968
1969 template<typename T>
1970 T MacroAssembler::CPUState::pc() const
1971 {
1972     CPUState* cpu = const_cast<CPUState*>(this);
1973     return reinterpret_cast<T>(cpu->pc());
1974 }
1975
1976 template<typename T>
1977 T MacroAssembler::CPUState::fp() const
1978 {
1979     CPUState* cpu = const_cast<CPUState*>(this);
1980     return reinterpret_cast<T>(cpu->fp());
1981 }
1982
1983 template<typename T>
1984 T MacroAssembler::CPUState::sp() const
1985 {
1986     CPUState* cpu = const_cast<CPUState*>(this);
1987     return reinterpret_cast<T>(cpu->sp());
1988 }
1989
1990 struct ProbeContext {
1991     using CPUState = MacroAssembler::CPUState;
1992     using RegisterID = MacroAssembler::RegisterID;
1993     using SPRegisterID = MacroAssembler::SPRegisterID;
1994     using FPRegisterID = MacroAssembler::FPRegisterID;
1995
1996     ProbeFunction probeFunction;
1997     void* arg;
1998     CPUState cpu;
1999
2000     // Convenience methods:
2001     uintptr_t& gpr(RegisterID id) { return cpu.gpr(id); }
2002     uintptr_t& spr(SPRegisterID id) { return cpu.spr(id); }
2003     double& fpr(FPRegisterID id) { return cpu.fpr(id); }
2004     const char* gprName(RegisterID id) { return cpu.gprName(id); }
2005     const char* sprName(SPRegisterID id) { return cpu.sprName(id); }
2006     const char* fprName(FPRegisterID id) { return cpu.fprName(id); }
2007
2008     void*& pc() { return cpu.pc(); }
2009     void*& fp() { return cpu.fp(); }
2010     void*& sp() { return cpu.sp(); }
2011
2012     template<typename T> T pc() { return cpu.pc<T>(); }
2013     template<typename T> T fp() { return cpu.fp<T>(); }
2014     template<typename T> T sp() { return cpu.sp<T>(); }
2015 };
2016     
2017 } // namespace JSC
2018
2019 namespace WTF {
2020
2021 class PrintStream;
2022
2023 void printInternal(PrintStream&, JSC::MacroAssembler::RelationalCondition);
2024 void printInternal(PrintStream&, JSC::MacroAssembler::ResultCondition);
2025 void printInternal(PrintStream&, JSC::MacroAssembler::DoubleCondition);
2026
2027 } // namespace WTF
2028
2029 #else // ENABLE(ASSEMBLER)
2030
2031 namespace JSC {
2032
2033 // If there is no assembler for this platform, at least allow code to make references to
2034 // some of the things it would otherwise define, albeit without giving that code any way
2035 // of doing anything useful.
2036 class MacroAssembler {
2037 private:
2038     MacroAssembler() { }
2039     
2040 public:
2041     
2042     enum RegisterID { NoRegister };
2043     enum FPRegisterID { NoFPRegister };
2044 };
2045
2046 } // namespace JSC
2047
2048 #endif // ENABLE(ASSEMBLER)