JSC should infer property types
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGOSRExitFuzz.h"
38 #include "DFGSaneStringGetByValSlowPathGenerator.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DirectArguments.h"
41 #include "JSArrowFunction.h"
42 #include "JSCInlines.h"
43 #include "JSEnvironmentRecord.h"
44 #include "JSLexicalEnvironment.h"
45 #include "LinkBuffer.h"
46 #include "ScopedArguments.h"
47 #include "ScratchRegisterAllocator.h"
48 #include "WriteBarrierBuffer.h"
49 #include <wtf/MathExtras.h>
50
51 namespace JSC { namespace DFG {
52
53 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
54     : m_compileOkay(true)
55     , m_jit(jit)
56     , m_currentNode(0)
57     , m_lastGeneratedNode(LastNodeType)
58     , m_indexInBlock(0)
59     , m_generationInfo(m_jit.graph().frameRegisterCount())
60     , m_state(m_jit.graph())
61     , m_interpreter(m_jit.graph(), m_state)
62     , m_stream(&jit.jitCode()->variableEventStream)
63     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
64 {
65 }
66
67 SpeculativeJIT::~SpeculativeJIT()
68 {
69 }
70
71 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
72 {
73     ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
74     
75     GPRTemporary scratch(this);
76     GPRTemporary scratch2(this);
77     GPRReg scratchGPR = scratch.gpr();
78     GPRReg scratch2GPR = scratch2.gpr();
79     
80     unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
81     
82     JITCompiler::JumpList slowCases;
83     
84     slowCases.append(
85         emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
86     m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
87     emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
88     
89     m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
90     m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
91     
92     if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
93 #if USE(JSVALUE64)
94         m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
95         for (unsigned i = numElements; i < vectorLength; ++i)
96             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
97 #else
98         EncodedValueDescriptor value;
99         value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
100         for (unsigned i = numElements; i < vectorLength; ++i) {
101             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
102             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
103         }
104 #endif
105     }
106     
107     // I want a slow path that also loads out the storage pointer, and that's
108     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
109     // of work for a very small piece of functionality. :-/
110     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
111         slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
112         structure, numElements));
113 }
114
115 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
116 {
117     if (inlineCallFrame && !inlineCallFrame->isVarargs())
118         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
119     else {
120         VirtualRegister argumentCountRegister;
121         if (!inlineCallFrame)
122             argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
123         else
124             argumentCountRegister = inlineCallFrame->argumentCountRegister;
125         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
126         if (!includeThis)
127             m_jit.sub32(TrustedImm32(1), lengthGPR);
128     }
129 }
130
131 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
132 {
133     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
134 }
135
136 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
137 {
138     if (origin.inlineCallFrame) {
139         if (origin.inlineCallFrame->isClosureCall) {
140             m_jit.loadPtr(
141                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
142                 calleeGPR);
143         } else {
144             m_jit.move(
145                 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
146                 calleeGPR);
147         }
148     } else
149         m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), calleeGPR);
150 }
151
152 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
153 {
154     m_jit.addPtr(
155         TrustedImm32(
156             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
157         GPRInfo::callFrameRegister, startGPR);
158 }
159
160 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
161 {
162     if (!doOSRExitFuzzing())
163         return MacroAssembler::Jump();
164     
165     MacroAssembler::Jump result;
166     
167     m_jit.pushToSave(GPRInfo::regT0);
168     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
169     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
170     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
171     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
172     unsigned at = Options::fireOSRExitFuzzAt();
173     if (at || atOrAfter) {
174         unsigned threshold;
175         MacroAssembler::RelationalCondition condition;
176         if (atOrAfter) {
177             threshold = atOrAfter;
178             condition = MacroAssembler::Below;
179         } else {
180             threshold = at;
181             condition = MacroAssembler::NotEqual;
182         }
183         MacroAssembler::Jump ok = m_jit.branch32(
184             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
185         m_jit.popToRestore(GPRInfo::regT0);
186         result = m_jit.jump();
187         ok.link(&m_jit);
188     }
189     m_jit.popToRestore(GPRInfo::regT0);
190     
191     return result;
192 }
193
194 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
195 {
196     if (!m_compileOkay)
197         return;
198     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
199     if (fuzzJump.isSet()) {
200         JITCompiler::JumpList jumpsToFail;
201         jumpsToFail.append(fuzzJump);
202         jumpsToFail.append(jumpToFail);
203         m_jit.appendExitInfo(jumpsToFail);
204     } else
205         m_jit.appendExitInfo(jumpToFail);
206     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
207 }
208
209 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
210 {
211     if (!m_compileOkay)
212         return;
213     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
214     if (fuzzJump.isSet()) {
215         JITCompiler::JumpList myJumpsToFail;
216         myJumpsToFail.append(jumpsToFail);
217         myJumpsToFail.append(fuzzJump);
218         m_jit.appendExitInfo(myJumpsToFail);
219     } else
220         m_jit.appendExitInfo(jumpsToFail);
221     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
222 }
223
224 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
225 {
226     if (!m_compileOkay)
227         return OSRExitJumpPlaceholder();
228     unsigned index = m_jit.jitCode()->osrExit.size();
229     m_jit.appendExitInfo();
230     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
231     return OSRExitJumpPlaceholder(index);
232 }
233
234 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
235 {
236     return speculationCheck(kind, jsValueSource, nodeUse.node());
237 }
238
239 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
240 {
241     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
242 }
243
244 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
245 {
246     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
247 }
248
249 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
250 {
251     if (!m_compileOkay)
252         return;
253     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
254     m_jit.appendExitInfo(jumpToFail);
255     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
256 }
257
258 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
259 {
260     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
261 }
262
263 void SpeculativeJIT::emitInvalidationPoint(Node* node)
264 {
265     if (!m_compileOkay)
266         return;
267     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
268     m_jit.jitCode()->appendOSRExit(OSRExit(
269         UncountableInvalidation, JSValueSource(),
270         m_jit.graph().methodOfGettingAValueProfileFor(node),
271         this, m_stream->size()));
272     info.m_replacementSource = m_jit.watchpointLabel();
273     ASSERT(info.m_replacementSource.isSet());
274     noResult(node);
275 }
276
277 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
278 {
279     if (!m_compileOkay)
280         return;
281     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
282     m_compileOkay = false;
283     if (verboseCompilationEnabled())
284         dataLog("Bailing compilation.\n");
285 }
286
287 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
288 {
289     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
290 }
291
292 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
293 {
294     ASSERT(needsTypeCheck(edge, typesPassedThrough));
295     m_interpreter.filter(edge, typesPassedThrough);
296     speculationCheck(BadType, source, edge.node(), jumpToFail);
297 }
298
299 RegisterSet SpeculativeJIT::usedRegisters()
300 {
301     RegisterSet result;
302     
303     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
304         GPRReg gpr = GPRInfo::toRegister(i);
305         if (m_gprs.isInUse(gpr))
306             result.set(gpr);
307     }
308     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
309         FPRReg fpr = FPRInfo::toRegister(i);
310         if (m_fprs.isInUse(fpr))
311             result.set(fpr);
312     }
313     
314     result.merge(RegisterSet::stubUnavailableRegisters());
315     
316     return result;
317 }
318
319 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
320 {
321     m_slowPathGenerators.append(WTF::move(slowPathGenerator));
322 }
323
324 void SpeculativeJIT::runSlowPathGenerators()
325 {
326     for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
327         m_slowPathGenerators[i]->generate(this);
328 }
329
330 // On Windows we need to wrap fmod; on other platforms we can call it directly.
331 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
332 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
333 static double JIT_OPERATION fmodAsDFGOperation(double x, double y)
334 {
335     return fmod(x, y);
336 }
337 #else
338 #define fmodAsDFGOperation fmod
339 #endif
340
341 void SpeculativeJIT::clearGenerationInfo()
342 {
343     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
344         m_generationInfo[i] = GenerationInfo();
345     m_gprs = RegisterBank<GPRInfo>();
346     m_fprs = RegisterBank<FPRInfo>();
347 }
348
349 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
350 {
351     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
352     Node* node = info.node();
353     DataFormat registerFormat = info.registerFormat();
354     ASSERT(registerFormat != DataFormatNone);
355     ASSERT(registerFormat != DataFormatDouble);
356         
357     SilentSpillAction spillAction;
358     SilentFillAction fillAction;
359         
360     if (!info.needsSpill())
361         spillAction = DoNothingForSpill;
362     else {
363 #if USE(JSVALUE64)
364         ASSERT(info.gpr() == source);
365         if (registerFormat == DataFormatInt32)
366             spillAction = Store32Payload;
367         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
368             spillAction = StorePtr;
369         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
370             spillAction = Store64;
371         else {
372             ASSERT(registerFormat & DataFormatJS);
373             spillAction = Store64;
374         }
375 #elif USE(JSVALUE32_64)
376         if (registerFormat & DataFormatJS) {
377             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
378             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
379         } else {
380             ASSERT(info.gpr() == source);
381             spillAction = Store32Payload;
382         }
383 #endif
384     }
385         
386     if (registerFormat == DataFormatInt32) {
387         ASSERT(info.gpr() == source);
388         ASSERT(isJSInt32(info.registerFormat()));
389         if (node->hasConstant()) {
390             ASSERT(node->isInt32Constant());
391             fillAction = SetInt32Constant;
392         } else
393             fillAction = Load32Payload;
394     } else if (registerFormat == DataFormatBoolean) {
395 #if USE(JSVALUE64)
396         RELEASE_ASSERT_NOT_REACHED();
397 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
398         fillAction = DoNothingForFill;
399 #endif
400 #elif USE(JSVALUE32_64)
401         ASSERT(info.gpr() == source);
402         if (node->hasConstant()) {
403             ASSERT(node->isBooleanConstant());
404             fillAction = SetBooleanConstant;
405         } else
406             fillAction = Load32Payload;
407 #endif
408     } else if (registerFormat == DataFormatCell) {
409         ASSERT(info.gpr() == source);
410         if (node->hasConstant()) {
411             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
412             node->asCell(); // To get the assertion.
413             fillAction = SetCellConstant;
414         } else {
415 #if USE(JSVALUE64)
416             fillAction = LoadPtr;
417 #else
418             fillAction = Load32Payload;
419 #endif
420         }
421     } else if (registerFormat == DataFormatStorage) {
422         ASSERT(info.gpr() == source);
423         fillAction = LoadPtr;
424     } else if (registerFormat == DataFormatInt52) {
425         if (node->hasConstant())
426             fillAction = SetInt52Constant;
427         else if (info.spillFormat() == DataFormatInt52)
428             fillAction = Load64;
429         else if (info.spillFormat() == DataFormatStrictInt52)
430             fillAction = Load64ShiftInt52Left;
431         else if (info.spillFormat() == DataFormatNone)
432             fillAction = Load64;
433         else {
434             RELEASE_ASSERT_NOT_REACHED();
435 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
436             fillAction = Load64; // Make GCC happy.
437 #endif
438         }
439     } else if (registerFormat == DataFormatStrictInt52) {
440         if (node->hasConstant())
441             fillAction = SetStrictInt52Constant;
442         else if (info.spillFormat() == DataFormatInt52)
443             fillAction = Load64ShiftInt52Right;
444         else if (info.spillFormat() == DataFormatStrictInt52)
445             fillAction = Load64;
446         else if (info.spillFormat() == DataFormatNone)
447             fillAction = Load64;
448         else {
449             RELEASE_ASSERT_NOT_REACHED();
450 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
451             fillAction = Load64; // Make GCC happy.
452 #endif
453         }
454     } else {
455         ASSERT(registerFormat & DataFormatJS);
456 #if USE(JSVALUE64)
457         ASSERT(info.gpr() == source);
458         if (node->hasConstant()) {
459             if (node->isCellConstant())
460                 fillAction = SetTrustedJSConstant;
461             else
462                 fillAction = SetJSConstant;
463         } else if (info.spillFormat() == DataFormatInt32) {
464             ASSERT(registerFormat == DataFormatJSInt32);
465             fillAction = Load32PayloadBoxInt;
466         } else
467             fillAction = Load64;
468 #else
469         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
470         if (node->hasConstant())
471             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
472         else if (info.payloadGPR() == source)
473             fillAction = Load32Payload;
474         else { // Fill the Tag
475             switch (info.spillFormat()) {
476             case DataFormatInt32:
477                 ASSERT(registerFormat == DataFormatJSInt32);
478                 fillAction = SetInt32Tag;
479                 break;
480             case DataFormatCell:
481                 ASSERT(registerFormat == DataFormatJSCell);
482                 fillAction = SetCellTag;
483                 break;
484             case DataFormatBoolean:
485                 ASSERT(registerFormat == DataFormatJSBoolean);
486                 fillAction = SetBooleanTag;
487                 break;
488             default:
489                 fillAction = Load32Tag;
490                 break;
491             }
492         }
493 #endif
494     }
495         
496     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
497 }
498     
499 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
500 {
501     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
502     Node* node = info.node();
503     ASSERT(info.registerFormat() == DataFormatDouble);
504
505     SilentSpillAction spillAction;
506     SilentFillAction fillAction;
507         
508     if (!info.needsSpill())
509         spillAction = DoNothingForSpill;
510     else {
511         ASSERT(!node->hasConstant());
512         ASSERT(info.spillFormat() == DataFormatNone);
513         ASSERT(info.fpr() == source);
514         spillAction = StoreDouble;
515     }
516         
517 #if USE(JSVALUE64)
518     if (node->hasConstant()) {
519         node->asNumber(); // To get the assertion.
520         fillAction = SetDoubleConstant;
521     } else {
522         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
523         fillAction = LoadDouble;
524     }
525 #elif USE(JSVALUE32_64)
526     ASSERT(info.registerFormat() == DataFormatDouble);
527     if (node->hasConstant()) {
528         node->asNumber(); // To get the assertion.
529         fillAction = SetDoubleConstant;
530     } else
531         fillAction = LoadDouble;
532 #endif
533
534     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
535 }
536     
537 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
538 {
539     switch (plan.spillAction()) {
540     case DoNothingForSpill:
541         break;
542     case Store32Tag:
543         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
544         break;
545     case Store32Payload:
546         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
547         break;
548     case StorePtr:
549         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
550         break;
551 #if USE(JSVALUE64)
552     case Store64:
553         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
554         break;
555 #endif
556     case StoreDouble:
557         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
558         break;
559     default:
560         RELEASE_ASSERT_NOT_REACHED();
561     }
562 }
563     
564 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
565 {
566 #if USE(JSVALUE32_64)
567     UNUSED_PARAM(canTrample);
568 #endif
569     switch (plan.fillAction()) {
570     case DoNothingForFill:
571         break;
572     case SetInt32Constant:
573         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
574         break;
575 #if USE(JSVALUE64)
576     case SetInt52Constant:
577         m_jit.move(Imm64(plan.node()->asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
578         break;
579     case SetStrictInt52Constant:
580         m_jit.move(Imm64(plan.node()->asMachineInt()), plan.gpr());
581         break;
582 #endif // USE(JSVALUE64)
583     case SetBooleanConstant:
584         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
585         break;
586     case SetCellConstant:
587         m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
588         break;
589 #if USE(JSVALUE64)
590     case SetTrustedJSConstant:
591         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
592         break;
593     case SetJSConstant:
594         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
595         break;
596     case SetDoubleConstant:
597         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
598         m_jit.move64ToDouble(canTrample, plan.fpr());
599         break;
600     case Load32PayloadBoxInt:
601         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
602         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
603         break;
604     case Load32PayloadConvertToInt52:
605         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
606         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
607         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
608         break;
609     case Load32PayloadSignExtend:
610         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
611         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
612         break;
613 #else
614     case SetJSConstantTag:
615         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
616         break;
617     case SetJSConstantPayload:
618         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
619         break;
620     case SetInt32Tag:
621         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
622         break;
623     case SetCellTag:
624         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
625         break;
626     case SetBooleanTag:
627         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
628         break;
629     case SetDoubleConstant:
630         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
631         break;
632 #endif
633     case Load32Tag:
634         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
635         break;
636     case Load32Payload:
637         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
638         break;
639     case LoadPtr:
640         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
641         break;
642 #if USE(JSVALUE64)
643     case Load64:
644         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
645         break;
646     case Load64ShiftInt52Right:
647         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
648         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
649         break;
650     case Load64ShiftInt52Left:
651         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
652         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
653         break;
654 #endif
655     case LoadDouble:
656         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
657         break;
658     default:
659         RELEASE_ASSERT_NOT_REACHED();
660     }
661 }
662     
663 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
664 {
665     switch (arrayMode.arrayClass()) {
666     case Array::OriginalArray: {
667         CRASH();
668 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
669         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
670         return result;
671 #endif
672     }
673         
674     case Array::Array:
675         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
676         return m_jit.branch32(
677             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
678         
679     case Array::NonArray:
680     case Array::OriginalNonArray:
681         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
682         return m_jit.branch32(
683             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
684         
685     case Array::PossiblyArray:
686         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
687         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
688     }
689     
690     RELEASE_ASSERT_NOT_REACHED();
691     return JITCompiler::Jump();
692 }
693
694 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
695 {
696     JITCompiler::JumpList result;
697     
698     switch (arrayMode.type()) {
699     case Array::Int32:
700         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
701
702     case Array::Double:
703         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
704
705     case Array::Contiguous:
706         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
707
708     case Array::Undecided:
709         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
710
711     case Array::ArrayStorage:
712     case Array::SlowPutArrayStorage: {
713         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
714         
715         if (arrayMode.isJSArray()) {
716             if (arrayMode.isSlowPut()) {
717                 result.append(
718                     m_jit.branchTest32(
719                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
720                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
721                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
722                 result.append(
723                     m_jit.branch32(
724                         MacroAssembler::Above, tempGPR,
725                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
726                 break;
727             }
728             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
729             result.append(
730                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
731             break;
732         }
733         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
734         if (arrayMode.isSlowPut()) {
735             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
736             result.append(
737                 m_jit.branch32(
738                     MacroAssembler::Above, tempGPR,
739                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
740             break;
741         }
742         result.append(
743             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
744         break;
745     }
746     default:
747         CRASH();
748         break;
749     }
750     
751     return result;
752 }
753
754 void SpeculativeJIT::checkArray(Node* node)
755 {
756     ASSERT(node->arrayMode().isSpecific());
757     ASSERT(!node->arrayMode().doesConversion());
758     
759     SpeculateCellOperand base(this, node->child1());
760     GPRReg baseReg = base.gpr();
761     
762     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
763         noResult(m_currentNode);
764         return;
765     }
766     
767     const ClassInfo* expectedClassInfo = 0;
768     
769     switch (node->arrayMode().type()) {
770     case Array::String:
771         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
772         break;
773     case Array::Int32:
774     case Array::Double:
775     case Array::Contiguous:
776     case Array::Undecided:
777     case Array::ArrayStorage:
778     case Array::SlowPutArrayStorage: {
779         GPRTemporary temp(this);
780         GPRReg tempGPR = temp.gpr();
781         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
782         speculationCheck(
783             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
784             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
785         
786         noResult(m_currentNode);
787         return;
788     }
789     case Array::DirectArguments:
790         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
791         noResult(m_currentNode);
792         return;
793     case Array::ScopedArguments:
794         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
795         noResult(m_currentNode);
796         return;
797     default:
798         speculateCellTypeWithoutTypeFiltering(
799             node->child1(), baseReg,
800             typeForTypedArrayType(node->arrayMode().typedArrayType()));
801         noResult(m_currentNode);
802         return;
803     }
804     
805     RELEASE_ASSERT(expectedClassInfo);
806     
807     GPRTemporary temp(this);
808     GPRTemporary temp2(this);
809     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
810     speculationCheck(
811         BadType, JSValueSource::unboxedCell(baseReg), node,
812         m_jit.branchPtr(
813             MacroAssembler::NotEqual,
814             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
815             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
816     
817     noResult(m_currentNode);
818 }
819
820 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
821 {
822     ASSERT(node->arrayMode().doesConversion());
823     
824     GPRTemporary temp(this);
825     GPRTemporary structure;
826     GPRReg tempGPR = temp.gpr();
827     GPRReg structureGPR = InvalidGPRReg;
828     
829     if (node->op() != ArrayifyToStructure) {
830         GPRTemporary realStructure(this);
831         structure.adopt(realStructure);
832         structureGPR = structure.gpr();
833     }
834         
835     // We can skip all that comes next if we already have array storage.
836     MacroAssembler::JumpList slowPath;
837     
838     if (node->op() == ArrayifyToStructure) {
839         slowPath.append(m_jit.branchWeakStructure(
840             JITCompiler::NotEqual,
841             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
842             node->structure()));
843     } else {
844         m_jit.load8(
845             MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
846         
847         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
848     }
849     
850     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
851         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
852     
853     noResult(m_currentNode);
854 }
855
856 void SpeculativeJIT::arrayify(Node* node)
857 {
858     ASSERT(node->arrayMode().isSpecific());
859     
860     SpeculateCellOperand base(this, node->child1());
861     
862     if (!node->child2()) {
863         arrayify(node, base.gpr(), InvalidGPRReg);
864         return;
865     }
866     
867     SpeculateInt32Operand property(this, node->child2());
868     
869     arrayify(node, base.gpr(), property.gpr());
870 }
871
872 GPRReg SpeculativeJIT::fillStorage(Edge edge)
873 {
874     VirtualRegister virtualRegister = edge->virtualRegister();
875     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
876     
877     switch (info.registerFormat()) {
878     case DataFormatNone: {
879         if (info.spillFormat() == DataFormatStorage) {
880             GPRReg gpr = allocate();
881             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
882             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
883             info.fillStorage(*m_stream, gpr);
884             return gpr;
885         }
886         
887         // Must be a cell; fill it as a cell and then return the pointer.
888         return fillSpeculateCell(edge);
889     }
890         
891     case DataFormatStorage: {
892         GPRReg gpr = info.gpr();
893         m_gprs.lock(gpr);
894         return gpr;
895     }
896         
897     default:
898         return fillSpeculateCell(edge);
899     }
900 }
901
902 void SpeculativeJIT::useChildren(Node* node)
903 {
904     if (node->flags() & NodeHasVarArgs) {
905         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
906             if (!!m_jit.graph().m_varArgChildren[childIdx])
907                 use(m_jit.graph().m_varArgChildren[childIdx]);
908         }
909     } else {
910         Edge child1 = node->child1();
911         if (!child1) {
912             ASSERT(!node->child2() && !node->child3());
913             return;
914         }
915         use(child1);
916         
917         Edge child2 = node->child2();
918         if (!child2) {
919             ASSERT(!node->child3());
920             return;
921         }
922         use(child2);
923         
924         Edge child3 = node->child3();
925         if (!child3)
926             return;
927         use(child3);
928     }
929 }
930
931 void SpeculativeJIT::compileIn(Node* node)
932 {
933     SpeculateCellOperand base(this, node->child2());
934     GPRReg baseGPR = base.gpr();
935     
936     if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
937         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
938             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
939             
940             GPRTemporary result(this);
941             GPRReg resultGPR = result.gpr();
942
943             use(node->child1());
944             
945             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
946             MacroAssembler::Label done = m_jit.label();
947             
948             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
949             // we can cast it to const AtomicStringImpl* safely.
950             auto slowPath = slowPathCall(
951                 jump.m_jump, this, operationInOptimize,
952                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
953                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
954             
955             stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
956             stubInfo->codeOrigin = node->origin.semantic;
957             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
958             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
959 #if USE(JSVALUE32_64)
960             stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
961 #endif
962             stubInfo->patch.usedRegisters = usedRegisters();
963             stubInfo->patch.spillMode = NeedToSpill;
964
965             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
966             addSlowPathGenerator(WTF::move(slowPath));
967
968             base.use();
969
970             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
971             return;
972         }
973     }
974
975     JSValueOperand key(this, node->child1());
976     JSValueRegs regs = key.jsValueRegs();
977         
978     GPRFlushedCallResult result(this);
979     GPRReg resultGPR = result.gpr();
980         
981     base.use();
982     key.use();
983         
984     flushRegisters();
985     callOperation(
986         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
987         baseGPR, regs);
988     m_jit.exceptionCheck();
989     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
990 }
991
992 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
993 {
994     unsigned branchIndexInBlock = detectPeepHoleBranch();
995     if (branchIndexInBlock != UINT_MAX) {
996         Node* branchNode = m_block->at(branchIndexInBlock);
997
998         ASSERT(node->adjustedRefCount() == 1);
999         
1000         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1001     
1002         m_indexInBlock = branchIndexInBlock;
1003         m_currentNode = branchNode;
1004         
1005         return true;
1006     }
1007     
1008     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1009     
1010     return false;
1011 }
1012
1013 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1014 {
1015     unsigned branchIndexInBlock = detectPeepHoleBranch();
1016     if (branchIndexInBlock != UINT_MAX) {
1017         Node* branchNode = m_block->at(branchIndexInBlock);
1018
1019         ASSERT(node->adjustedRefCount() == 1);
1020         
1021         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1022     
1023         m_indexInBlock = branchIndexInBlock;
1024         m_currentNode = branchNode;
1025         
1026         return true;
1027     }
1028     
1029     nonSpeculativeNonPeepholeStrictEq(node, invert);
1030     
1031     return false;
1032 }
1033
1034 static const char* dataFormatString(DataFormat format)
1035 {
1036     // These values correspond to the DataFormat enum.
1037     const char* strings[] = {
1038         "[  ]",
1039         "[ i]",
1040         "[ d]",
1041         "[ c]",
1042         "Err!",
1043         "Err!",
1044         "Err!",
1045         "Err!",
1046         "[J ]",
1047         "[Ji]",
1048         "[Jd]",
1049         "[Jc]",
1050         "Err!",
1051         "Err!",
1052         "Err!",
1053         "Err!",
1054     };
1055     return strings[format];
1056 }
1057
1058 void SpeculativeJIT::dump(const char* label)
1059 {
1060     if (label)
1061         dataLogF("<%s>\n", label);
1062
1063     dataLogF("  gprs:\n");
1064     m_gprs.dump();
1065     dataLogF("  fprs:\n");
1066     m_fprs.dump();
1067     dataLogF("  VirtualRegisters:\n");
1068     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1069         GenerationInfo& info = m_generationInfo[i];
1070         if (info.alive())
1071             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1072         else
1073             dataLogF("    % 3d:[__][__]", i);
1074         if (info.registerFormat() == DataFormatDouble)
1075             dataLogF(":fpr%d\n", info.fpr());
1076         else if (info.registerFormat() != DataFormatNone
1077 #if USE(JSVALUE32_64)
1078             && !(info.registerFormat() & DataFormatJS)
1079 #endif
1080             ) {
1081             ASSERT(info.gpr() != InvalidGPRReg);
1082             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1083         } else
1084             dataLogF("\n");
1085     }
1086     if (label)
1087         dataLogF("</%s>\n", label);
1088 }
1089
1090 GPRTemporary::GPRTemporary()
1091     : m_jit(0)
1092     , m_gpr(InvalidGPRReg)
1093 {
1094 }
1095
1096 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1097     : m_jit(jit)
1098     , m_gpr(InvalidGPRReg)
1099 {
1100     m_gpr = m_jit->allocate();
1101 }
1102
1103 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1104     : m_jit(jit)
1105     , m_gpr(InvalidGPRReg)
1106 {
1107     m_gpr = m_jit->allocate(specific);
1108 }
1109
1110 #if USE(JSVALUE32_64)
1111 GPRTemporary::GPRTemporary(
1112     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1113     : m_jit(jit)
1114     , m_gpr(InvalidGPRReg)
1115 {
1116     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1117         m_gpr = m_jit->reuse(op1.gpr(which));
1118     else
1119         m_gpr = m_jit->allocate();
1120 }
1121 #endif // USE(JSVALUE32_64)
1122
1123 JSValueRegsTemporary::JSValueRegsTemporary() { }
1124
1125 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1126 #if USE(JSVALUE64)
1127     : m_gpr(jit)
1128 #else
1129     : m_payloadGPR(jit)
1130     , m_tagGPR(jit)
1131 #endif
1132 {
1133 }
1134
1135 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1136
1137 JSValueRegs JSValueRegsTemporary::regs()
1138 {
1139 #if USE(JSVALUE64)
1140     return JSValueRegs(m_gpr.gpr());
1141 #else
1142     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1143 #endif
1144 }
1145
1146 void GPRTemporary::adopt(GPRTemporary& other)
1147 {
1148     ASSERT(!m_jit);
1149     ASSERT(m_gpr == InvalidGPRReg);
1150     ASSERT(other.m_jit);
1151     ASSERT(other.m_gpr != InvalidGPRReg);
1152     m_jit = other.m_jit;
1153     m_gpr = other.m_gpr;
1154     other.m_jit = 0;
1155     other.m_gpr = InvalidGPRReg;
1156 }
1157
1158 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1159     : m_jit(jit)
1160     , m_fpr(InvalidFPRReg)
1161 {
1162     m_fpr = m_jit->fprAllocate();
1163 }
1164
1165 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1166     : m_jit(jit)
1167     , m_fpr(InvalidFPRReg)
1168 {
1169     if (m_jit->canReuse(op1.node()))
1170         m_fpr = m_jit->reuse(op1.fpr());
1171     else
1172         m_fpr = m_jit->fprAllocate();
1173 }
1174
1175 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1176     : m_jit(jit)
1177     , m_fpr(InvalidFPRReg)
1178 {
1179     if (m_jit->canReuse(op1.node()))
1180         m_fpr = m_jit->reuse(op1.fpr());
1181     else if (m_jit->canReuse(op2.node()))
1182         m_fpr = m_jit->reuse(op2.fpr());
1183     else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1184         m_fpr = m_jit->reuse(op1.fpr());
1185     else
1186         m_fpr = m_jit->fprAllocate();
1187 }
1188
1189 #if USE(JSVALUE32_64)
1190 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1191     : m_jit(jit)
1192     , m_fpr(InvalidFPRReg)
1193 {
1194     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1195         m_fpr = m_jit->reuse(op1.fpr());
1196     else
1197         m_fpr = m_jit->fprAllocate();
1198 }
1199 #endif
1200
1201 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1202 {
1203     BasicBlock* taken = branchNode->branchData()->taken.block;
1204     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1205     
1206     SpeculateDoubleOperand op1(this, node->child1());
1207     SpeculateDoubleOperand op2(this, node->child2());
1208     
1209     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1210     jump(notTaken);
1211 }
1212
1213 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1214 {
1215     BasicBlock* taken = branchNode->branchData()->taken.block;
1216     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1217
1218     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1219     
1220     if (taken == nextBlock()) {
1221         condition = MacroAssembler::NotEqual;
1222         BasicBlock* tmp = taken;
1223         taken = notTaken;
1224         notTaken = tmp;
1225     }
1226
1227     SpeculateCellOperand op1(this, node->child1());
1228     SpeculateCellOperand op2(this, node->child2());
1229     
1230     GPRReg op1GPR = op1.gpr();
1231     GPRReg op2GPR = op2.gpr();
1232     
1233     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1234         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1235             speculationCheck(
1236                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1237         }
1238         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1239             speculationCheck(
1240                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1241         }
1242     } else {
1243         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1244             speculationCheck(
1245                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1246                 m_jit.branchIfNotObject(op1GPR));
1247         }
1248         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1249             m_jit.branchTest8(
1250                 MacroAssembler::NonZero, 
1251                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1252                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1253
1254         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1255             speculationCheck(
1256                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1257                 m_jit.branchIfNotObject(op2GPR));
1258         }
1259         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1260             m_jit.branchTest8(
1261                 MacroAssembler::NonZero, 
1262                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1263                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1264     }
1265
1266     branchPtr(condition, op1GPR, op2GPR, taken);
1267     jump(notTaken);
1268 }
1269
1270 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1271 {
1272     BasicBlock* taken = branchNode->branchData()->taken.block;
1273     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1274
1275     // The branch instruction will branch to the taken block.
1276     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1277     if (taken == nextBlock()) {
1278         condition = JITCompiler::invert(condition);
1279         BasicBlock* tmp = taken;
1280         taken = notTaken;
1281         notTaken = tmp;
1282     }
1283
1284     if (node->child1()->isBooleanConstant()) {
1285         bool imm = node->child1()->asBoolean();
1286         SpeculateBooleanOperand op2(this, node->child2());
1287         branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1288     } else if (node->child2()->isBooleanConstant()) {
1289         SpeculateBooleanOperand op1(this, node->child1());
1290         bool imm = node->child2()->asBoolean();
1291         branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1292     } else {
1293         SpeculateBooleanOperand op1(this, node->child1());
1294         SpeculateBooleanOperand op2(this, node->child2());
1295         branch32(condition, op1.gpr(), op2.gpr(), taken);
1296     }
1297
1298     jump(notTaken);
1299 }
1300
1301 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1302 {
1303     BasicBlock* taken = branchNode->branchData()->taken.block;
1304     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1305
1306     // The branch instruction will branch to the taken block.
1307     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1308     if (taken == nextBlock()) {
1309         condition = JITCompiler::invert(condition);
1310         BasicBlock* tmp = taken;
1311         taken = notTaken;
1312         notTaken = tmp;
1313     }
1314
1315     if (node->child1()->isInt32Constant()) {
1316         int32_t imm = node->child1()->asInt32();
1317         SpeculateInt32Operand op2(this, node->child2());
1318         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1319     } else if (node->child2()->isInt32Constant()) {
1320         SpeculateInt32Operand op1(this, node->child1());
1321         int32_t imm = node->child2()->asInt32();
1322         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1323     } else {
1324         SpeculateInt32Operand op1(this, node->child1());
1325         SpeculateInt32Operand op2(this, node->child2());
1326         branch32(condition, op1.gpr(), op2.gpr(), taken);
1327     }
1328
1329     jump(notTaken);
1330 }
1331
1332 // Returns true if the compare is fused with a subsequent branch.
1333 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1334 {
1335     // Fused compare & branch.
1336     unsigned branchIndexInBlock = detectPeepHoleBranch();
1337     if (branchIndexInBlock != UINT_MAX) {
1338         Node* branchNode = m_block->at(branchIndexInBlock);
1339
1340         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1341         // so can be no intervening nodes to also reference the compare. 
1342         ASSERT(node->adjustedRefCount() == 1);
1343
1344         if (node->isBinaryUseKind(Int32Use))
1345             compilePeepHoleInt32Branch(node, branchNode, condition);
1346 #if USE(JSVALUE64)
1347         else if (node->isBinaryUseKind(Int52RepUse))
1348             compilePeepHoleInt52Branch(node, branchNode, condition);
1349 #endif // USE(JSVALUE64)
1350         else if (node->isBinaryUseKind(DoubleRepUse))
1351             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1352         else if (node->op() == CompareEq) {
1353             if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1354                 // Use non-peephole comparison, for now.
1355                 return false;
1356             }
1357             if (node->isBinaryUseKind(BooleanUse))
1358                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1359             else if (node->isBinaryUseKind(ObjectUse))
1360                 compilePeepHoleObjectEquality(node, branchNode);
1361             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1362                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1363             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1364                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1365             else if (!needsTypeCheck(node->child1(), SpecOther))
1366                 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1367             else if (!needsTypeCheck(node->child2(), SpecOther))
1368                 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1369             else {
1370                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1371                 return true;
1372             }
1373         } else {
1374             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1375             return true;
1376         }
1377
1378         use(node->child1());
1379         use(node->child2());
1380         m_indexInBlock = branchIndexInBlock;
1381         m_currentNode = branchNode;
1382         return true;
1383     }
1384     return false;
1385 }
1386
1387 void SpeculativeJIT::noticeOSRBirth(Node* node)
1388 {
1389     if (!node->hasVirtualRegister())
1390         return;
1391     
1392     VirtualRegister virtualRegister = node->virtualRegister();
1393     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1394     
1395     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1396 }
1397
1398 void SpeculativeJIT::compileMovHint(Node* node)
1399 {
1400     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1401     
1402     Node* child = node->child1().node();
1403     noticeOSRBirth(child);
1404     
1405     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1406 }
1407
1408 void SpeculativeJIT::bail(AbortReason reason)
1409 {
1410     if (verboseCompilationEnabled())
1411         dataLog("Bailing compilation.\n");
1412     m_compileOkay = true;
1413     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1414     clearGenerationInfo();
1415 }
1416
1417 void SpeculativeJIT::compileCurrentBlock()
1418 {
1419     ASSERT(m_compileOkay);
1420     
1421     if (!m_block)
1422         return;
1423     
1424     ASSERT(m_block->isReachable);
1425     
1426     m_jit.blockHeads()[m_block->index] = m_jit.label();
1427
1428     if (!m_block->intersectionOfCFAHasVisited) {
1429         // Don't generate code for basic blocks that are unreachable according to CFA.
1430         // But to be sure that nobody has generated a jump to this block, drop in a
1431         // breakpoint here.
1432         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1433         return;
1434     }
1435
1436     m_stream->appendAndLog(VariableEvent::reset());
1437     
1438     m_jit.jitAssertHasValidCallFrame();
1439     m_jit.jitAssertTagsInPlace();
1440     m_jit.jitAssertArgumentCountSane();
1441
1442     m_state.reset();
1443     m_state.beginBasicBlock(m_block);
1444     
1445     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1446         int operand = m_block->variablesAtHead.operandForIndex(i);
1447         Node* node = m_block->variablesAtHead[i];
1448         if (!node)
1449             continue; // No need to record dead SetLocal's.
1450         
1451         VariableAccessData* variable = node->variableAccessData();
1452         DataFormat format;
1453         if (!node->refCount())
1454             continue; // No need to record dead SetLocal's.
1455         format = dataFormatFor(variable->flushFormat());
1456         m_stream->appendAndLog(
1457             VariableEvent::setLocal(
1458                 VirtualRegister(operand),
1459                 variable->machineLocal(),
1460                 format));
1461     }
1462
1463     m_origin = NodeOrigin();
1464     
1465     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1466         m_currentNode = m_block->at(m_indexInBlock);
1467         
1468         // We may have hit a contradiction that the CFA was aware of but that the JIT
1469         // didn't cause directly.
1470         if (!m_state.isValid()) {
1471             bail(DFGBailedAtTopOfBlock);
1472             return;
1473         }
1474
1475         m_interpreter.startExecuting();
1476         m_jit.setForNode(m_currentNode);
1477         m_origin = m_currentNode->origin;
1478         if (validationEnabled())
1479             m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1480         m_lastGeneratedNode = m_currentNode->op();
1481         
1482         ASSERT(m_currentNode->shouldGenerate());
1483         
1484         if (verboseCompilationEnabled()) {
1485             dataLogF(
1486                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1487                 (int)m_currentNode->index(),
1488                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1489             dataLog("\n");
1490         }
1491         
1492         m_jit.jitAssertNoException();
1493
1494         compile(m_currentNode);
1495         
1496         if (belongsInMinifiedGraph(m_currentNode->op()))
1497             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1498         
1499 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1500         m_jit.clearRegisterAllocationOffsets();
1501 #endif
1502         
1503         if (!m_compileOkay) {
1504             bail(DFGBailedAtEndOfNode);
1505             return;
1506         }
1507         
1508         // Make sure that the abstract state is rematerialized for the next node.
1509         m_interpreter.executeEffects(m_indexInBlock);
1510     }
1511     
1512     // Perform the most basic verification that children have been used correctly.
1513     if (!ASSERT_DISABLED) {
1514         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1515             GenerationInfo& info = m_generationInfo[index];
1516             RELEASE_ASSERT(!info.alive());
1517         }
1518     }
1519 }
1520
1521 // If we are making type predictions about our arguments then
1522 // we need to check that they are correct on function entry.
1523 void SpeculativeJIT::checkArgumentTypes()
1524 {
1525     ASSERT(!m_currentNode);
1526     m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1527
1528     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1529         Node* node = m_jit.graph().m_arguments[i];
1530         if (!node) {
1531             // The argument is dead. We don't do any checks for such arguments.
1532             continue;
1533         }
1534         
1535         ASSERT(node->op() == SetArgument);
1536         ASSERT(node->shouldGenerate());
1537
1538         VariableAccessData* variableAccessData = node->variableAccessData();
1539         FlushFormat format = variableAccessData->flushFormat();
1540         
1541         if (format == FlushedJSValue)
1542             continue;
1543         
1544         VirtualRegister virtualRegister = variableAccessData->local();
1545
1546         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1547         
1548 #if USE(JSVALUE64)
1549         switch (format) {
1550         case FlushedInt32: {
1551             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1552             break;
1553         }
1554         case FlushedBoolean: {
1555             GPRTemporary temp(this);
1556             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1557             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1558             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1559             break;
1560         }
1561         case FlushedCell: {
1562             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1563             break;
1564         }
1565         default:
1566             RELEASE_ASSERT_NOT_REACHED();
1567             break;
1568         }
1569 #else
1570         switch (format) {
1571         case FlushedInt32: {
1572             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1573             break;
1574         }
1575         case FlushedBoolean: {
1576             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1577             break;
1578         }
1579         case FlushedCell: {
1580             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1581             break;
1582         }
1583         default:
1584             RELEASE_ASSERT_NOT_REACHED();
1585             break;
1586         }
1587 #endif
1588     }
1589
1590     m_origin = NodeOrigin();
1591 }
1592
1593 bool SpeculativeJIT::compile()
1594 {
1595     checkArgumentTypes();
1596     
1597     ASSERT(!m_currentNode);
1598     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1599         m_jit.setForBlockIndex(blockIndex);
1600         m_block = m_jit.graph().block(blockIndex);
1601         compileCurrentBlock();
1602     }
1603     linkBranches();
1604     return true;
1605 }
1606
1607 void SpeculativeJIT::createOSREntries()
1608 {
1609     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1610         BasicBlock* block = m_jit.graph().block(blockIndex);
1611         if (!block)
1612             continue;
1613         if (!block->isOSRTarget)
1614             continue;
1615         
1616         // Currently we don't have OSR entry trampolines. We could add them
1617         // here if need be.
1618         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1619     }
1620 }
1621
1622 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1623 {
1624     unsigned osrEntryIndex = 0;
1625     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1626         BasicBlock* block = m_jit.graph().block(blockIndex);
1627         if (!block)
1628             continue;
1629         if (!block->isOSRTarget)
1630             continue;
1631         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1632     }
1633     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1634     
1635     if (verboseCompilationEnabled()) {
1636         DumpContext dumpContext;
1637         dataLog("OSR Entries:\n");
1638         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1639             dataLog("    ", inContext(entryData, &dumpContext), "\n");
1640         if (!dumpContext.isEmpty())
1641             dumpContext.dump(WTF::dataFile());
1642     }
1643 }
1644
1645 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1646 {
1647     Edge child3 = m_jit.graph().varArgChild(node, 2);
1648     Edge child4 = m_jit.graph().varArgChild(node, 3);
1649
1650     ArrayMode arrayMode = node->arrayMode();
1651     
1652     GPRReg baseReg = base.gpr();
1653     GPRReg propertyReg = property.gpr();
1654     
1655     SpeculateDoubleOperand value(this, child3);
1656
1657     FPRReg valueReg = value.fpr();
1658     
1659     DFG_TYPE_CHECK(
1660         JSValueRegs(), child3, SpecFullRealNumber,
1661         m_jit.branchDouble(
1662             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1663     
1664     if (!m_compileOkay)
1665         return;
1666     
1667     StorageOperand storage(this, child4);
1668     GPRReg storageReg = storage.gpr();
1669
1670     if (node->op() == PutByValAlias) {
1671         // Store the value to the array.
1672         GPRReg propertyReg = property.gpr();
1673         FPRReg valueReg = value.fpr();
1674         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1675         
1676         noResult(m_currentNode);
1677         return;
1678     }
1679     
1680     GPRTemporary temporary;
1681     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1682
1683     MacroAssembler::Jump slowCase;
1684     
1685     if (arrayMode.isInBounds()) {
1686         speculationCheck(
1687             OutOfBounds, JSValueRegs(), 0,
1688             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1689     } else {
1690         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1691         
1692         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1693         
1694         if (!arrayMode.isOutOfBounds())
1695             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1696         
1697         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1698         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1699         
1700         inBounds.link(&m_jit);
1701     }
1702     
1703     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1704
1705     base.use();
1706     property.use();
1707     value.use();
1708     storage.use();
1709     
1710     if (arrayMode.isOutOfBounds()) {
1711         addSlowPathGenerator(
1712             slowPathCall(
1713                 slowCase, this,
1714                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1715                 NoResult, baseReg, propertyReg, valueReg));
1716     }
1717
1718     noResult(m_currentNode, UseChildrenCalledExplicitly);
1719 }
1720
1721 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1722 {
1723     SpeculateCellOperand string(this, node->child1());
1724     SpeculateStrictInt32Operand index(this, node->child2());
1725     StorageOperand storage(this, node->child3());
1726
1727     GPRReg stringReg = string.gpr();
1728     GPRReg indexReg = index.gpr();
1729     GPRReg storageReg = storage.gpr();
1730     
1731     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1732
1733     // unsigned comparison so we can filter out negative indices and indices that are too large
1734     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1735
1736     GPRTemporary scratch(this);
1737     GPRReg scratchReg = scratch.gpr();
1738
1739     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1740
1741     // Load the character into scratchReg
1742     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1743
1744     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1745     JITCompiler::Jump cont8Bit = m_jit.jump();
1746
1747     is16Bit.link(&m_jit);
1748
1749     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1750
1751     cont8Bit.link(&m_jit);
1752
1753     int32Result(scratchReg, m_currentNode);
1754 }
1755
1756 void SpeculativeJIT::compileGetByValOnString(Node* node)
1757 {
1758     SpeculateCellOperand base(this, node->child1());
1759     SpeculateStrictInt32Operand property(this, node->child2());
1760     StorageOperand storage(this, node->child3());
1761     GPRReg baseReg = base.gpr();
1762     GPRReg propertyReg = property.gpr();
1763     GPRReg storageReg = storage.gpr();
1764
1765     GPRTemporary scratch(this);
1766     GPRReg scratchReg = scratch.gpr();
1767 #if USE(JSVALUE32_64)
1768     GPRTemporary resultTag;
1769     GPRReg resultTagReg = InvalidGPRReg;
1770     if (node->arrayMode().isOutOfBounds()) {
1771         GPRTemporary realResultTag(this);
1772         resultTag.adopt(realResultTag);
1773         resultTagReg = resultTag.gpr();
1774     }
1775 #endif
1776
1777     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1778
1779     // unsigned comparison so we can filter out negative indices and indices that are too large
1780     JITCompiler::Jump outOfBounds = m_jit.branch32(
1781         MacroAssembler::AboveOrEqual, propertyReg,
1782         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1783     if (node->arrayMode().isInBounds())
1784         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1785
1786     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1787
1788     // Load the character into scratchReg
1789     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1790
1791     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1792     JITCompiler::Jump cont8Bit = m_jit.jump();
1793
1794     is16Bit.link(&m_jit);
1795
1796     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1797
1798     JITCompiler::Jump bigCharacter =
1799         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1800
1801     // 8 bit string values don't need the isASCII check.
1802     cont8Bit.link(&m_jit);
1803
1804     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1805     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1806     m_jit.loadPtr(scratchReg, scratchReg);
1807
1808     addSlowPathGenerator(
1809         slowPathCall(
1810             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1811
1812     if (node->arrayMode().isOutOfBounds()) {
1813 #if USE(JSVALUE32_64)
1814         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1815 #endif
1816
1817         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1818         if (globalObject->stringPrototypeChainIsSane()) {
1819             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
1820             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
1821             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
1822             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
1823             // indexed properties either.
1824             // https://bugs.webkit.org/show_bug.cgi?id=144668
1825             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
1826             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
1827             
1828 #if USE(JSVALUE64)
1829             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1830                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
1831 #else
1832             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1833                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
1834                 baseReg, propertyReg));
1835 #endif
1836         } else {
1837 #if USE(JSVALUE64)
1838             addSlowPathGenerator(
1839                 slowPathCall(
1840                     outOfBounds, this, operationGetByValStringInt,
1841                     scratchReg, baseReg, propertyReg));
1842 #else
1843             addSlowPathGenerator(
1844                 slowPathCall(
1845                     outOfBounds, this, operationGetByValStringInt,
1846                     resultTagReg, scratchReg, baseReg, propertyReg));
1847 #endif
1848         }
1849         
1850 #if USE(JSVALUE64)
1851         jsValueResult(scratchReg, m_currentNode);
1852 #else
1853         jsValueResult(resultTagReg, scratchReg, m_currentNode);
1854 #endif
1855     } else
1856         cellResult(scratchReg, m_currentNode);
1857 }
1858
1859 void SpeculativeJIT::compileFromCharCode(Node* node)
1860 {
1861     SpeculateStrictInt32Operand property(this, node->child1());
1862     GPRReg propertyReg = property.gpr();
1863     GPRTemporary smallStrings(this);
1864     GPRTemporary scratch(this);
1865     GPRReg scratchReg = scratch.gpr();
1866     GPRReg smallStringsReg = smallStrings.gpr();
1867
1868     JITCompiler::JumpList slowCases;
1869     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
1870     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
1871     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
1872
1873     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
1874     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
1875     cellResult(scratchReg, m_currentNode);
1876 }
1877
1878 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
1879 {
1880     VirtualRegister virtualRegister = node->virtualRegister();
1881     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1882
1883     switch (info.registerFormat()) {
1884     case DataFormatStorage:
1885         RELEASE_ASSERT_NOT_REACHED();
1886
1887     case DataFormatBoolean:
1888     case DataFormatCell:
1889         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1890         return GeneratedOperandTypeUnknown;
1891
1892     case DataFormatNone:
1893     case DataFormatJSCell:
1894     case DataFormatJS:
1895     case DataFormatJSBoolean:
1896     case DataFormatJSDouble:
1897         return GeneratedOperandJSValue;
1898
1899     case DataFormatJSInt32:
1900     case DataFormatInt32:
1901         return GeneratedOperandInteger;
1902
1903     default:
1904         RELEASE_ASSERT_NOT_REACHED();
1905         return GeneratedOperandTypeUnknown;
1906     }
1907 }
1908
1909 void SpeculativeJIT::compileValueToInt32(Node* node)
1910 {
1911     switch (node->child1().useKind()) {
1912 #if USE(JSVALUE64)
1913     case Int52RepUse: {
1914         SpeculateStrictInt52Operand op1(this, node->child1());
1915         GPRTemporary result(this, Reuse, op1);
1916         GPRReg op1GPR = op1.gpr();
1917         GPRReg resultGPR = result.gpr();
1918         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
1919         int32Result(resultGPR, node, DataFormatInt32);
1920         return;
1921     }
1922 #endif // USE(JSVALUE64)
1923         
1924     case DoubleRepUse: {
1925         GPRTemporary result(this);
1926         SpeculateDoubleOperand op1(this, node->child1());
1927         FPRReg fpr = op1.fpr();
1928         GPRReg gpr = result.gpr();
1929         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
1930         
1931         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
1932         
1933         int32Result(gpr, node);
1934         return;
1935     }
1936     
1937     case NumberUse:
1938     case NotCellUse: {
1939         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
1940         case GeneratedOperandInteger: {
1941             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
1942             GPRTemporary result(this, Reuse, op1);
1943             m_jit.move(op1.gpr(), result.gpr());
1944             int32Result(result.gpr(), node, op1.format());
1945             return;
1946         }
1947         case GeneratedOperandJSValue: {
1948             GPRTemporary result(this);
1949 #if USE(JSVALUE64)
1950             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1951
1952             GPRReg gpr = op1.gpr();
1953             GPRReg resultGpr = result.gpr();
1954             FPRTemporary tempFpr(this);
1955             FPRReg fpr = tempFpr.fpr();
1956
1957             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
1958             JITCompiler::JumpList converted;
1959
1960             if (node->child1().useKind() == NumberUse) {
1961                 DFG_TYPE_CHECK(
1962                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
1963                     m_jit.branchTest64(
1964                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
1965             } else {
1966                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
1967                 
1968                 DFG_TYPE_CHECK(
1969                     JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
1970                 
1971                 // It's not a cell: so true turns into 1 and all else turns into 0.
1972                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
1973                 converted.append(m_jit.jump());
1974                 
1975                 isNumber.link(&m_jit);
1976             }
1977
1978             // First, if we get here we have a double encoded as a JSValue
1979             m_jit.move(gpr, resultGpr);
1980             unboxDouble(resultGpr, fpr);
1981
1982             silentSpillAllRegisters(resultGpr);
1983             callOperation(toInt32, resultGpr, fpr);
1984             silentFillAllRegisters(resultGpr);
1985             m_jit.exceptionCheck();
1986
1987             converted.append(m_jit.jump());
1988
1989             isInteger.link(&m_jit);
1990             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
1991
1992             converted.link(&m_jit);
1993 #else
1994             Node* childNode = node->child1().node();
1995             VirtualRegister virtualRegister = childNode->virtualRegister();
1996             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1997
1998             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1999
2000             GPRReg payloadGPR = op1.payloadGPR();
2001             GPRReg resultGpr = result.gpr();
2002         
2003             JITCompiler::JumpList converted;
2004
2005             if (info.registerFormat() == DataFormatJSInt32)
2006                 m_jit.move(payloadGPR, resultGpr);
2007             else {
2008                 GPRReg tagGPR = op1.tagGPR();
2009                 FPRTemporary tempFpr(this);
2010                 FPRReg fpr = tempFpr.fpr();
2011                 FPRTemporary scratch(this);
2012
2013                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2014
2015                 if (node->child1().useKind() == NumberUse) {
2016                     DFG_TYPE_CHECK(
2017                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2018                         m_jit.branch32(
2019                             MacroAssembler::AboveOrEqual, tagGPR,
2020                             TrustedImm32(JSValue::LowestTag)));
2021                 } else {
2022                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2023                     
2024                     DFG_TYPE_CHECK(
2025                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2026                         m_jit.branchIfCell(op1.jsValueRegs()));
2027                     
2028                     // It's not a cell: so true turns into 1 and all else turns into 0.
2029                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2030                     m_jit.move(TrustedImm32(0), resultGpr);
2031                     converted.append(m_jit.jump());
2032                     
2033                     isBoolean.link(&m_jit);
2034                     m_jit.move(payloadGPR, resultGpr);
2035                     converted.append(m_jit.jump());
2036                     
2037                     isNumber.link(&m_jit);
2038                 }
2039
2040                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2041
2042                 silentSpillAllRegisters(resultGpr);
2043                 callOperation(toInt32, resultGpr, fpr);
2044                 silentFillAllRegisters(resultGpr);
2045                 m_jit.exceptionCheck();
2046
2047                 converted.append(m_jit.jump());
2048
2049                 isInteger.link(&m_jit);
2050                 m_jit.move(payloadGPR, resultGpr);
2051
2052                 converted.link(&m_jit);
2053             }
2054 #endif
2055             int32Result(resultGpr, node);
2056             return;
2057         }
2058         case GeneratedOperandTypeUnknown:
2059             RELEASE_ASSERT(!m_compileOkay);
2060             return;
2061         }
2062         RELEASE_ASSERT_NOT_REACHED();
2063         return;
2064     }
2065     
2066     default:
2067         ASSERT(!m_compileOkay);
2068         return;
2069     }
2070 }
2071
2072 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2073 {
2074     if (doesOverflow(node->arithMode())) {
2075         // We know that this sometimes produces doubles. So produce a double every
2076         // time. This at least allows subsequent code to not have weird conditionals.
2077             
2078         SpeculateInt32Operand op1(this, node->child1());
2079         FPRTemporary result(this);
2080             
2081         GPRReg inputGPR = op1.gpr();
2082         FPRReg outputFPR = result.fpr();
2083             
2084         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2085             
2086         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2087         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2088         positive.link(&m_jit);
2089             
2090         doubleResult(outputFPR, node);
2091         return;
2092     }
2093     
2094     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2095
2096     SpeculateInt32Operand op1(this, node->child1());
2097     GPRTemporary result(this);
2098
2099     m_jit.move(op1.gpr(), result.gpr());
2100
2101     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2102
2103     int32Result(result.gpr(), node, op1.format());
2104 }
2105
2106 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2107 {
2108     SpeculateDoubleOperand op1(this, node->child1());
2109     FPRTemporary scratch(this);
2110     GPRTemporary result(this);
2111     
2112     FPRReg valueFPR = op1.fpr();
2113     FPRReg scratchFPR = scratch.fpr();
2114     GPRReg resultGPR = result.gpr();
2115
2116     JITCompiler::JumpList failureCases;
2117     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2118     m_jit.branchConvertDoubleToInt32(
2119         valueFPR, resultGPR, failureCases, scratchFPR,
2120         shouldCheckNegativeZero(node->arithMode()));
2121     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2122
2123     int32Result(resultGPR, node);
2124 }
2125
2126 void SpeculativeJIT::compileDoubleRep(Node* node)
2127 {
2128     switch (node->child1().useKind()) {
2129     case RealNumberUse: {
2130         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2131         FPRTemporary result(this);
2132         
2133         JSValueRegs op1Regs = op1.jsValueRegs();
2134         FPRReg resultFPR = result.fpr();
2135         
2136 #if USE(JSVALUE64)
2137         GPRTemporary temp(this);
2138         GPRReg tempGPR = temp.gpr();
2139         m_jit.move(op1Regs.gpr(), tempGPR);
2140         m_jit.unboxDoubleWithoutAssertions(tempGPR, resultFPR);
2141 #else
2142         FPRTemporary temp(this);
2143         FPRReg tempFPR = temp.fpr();
2144         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2145 #endif
2146         
2147         JITCompiler::Jump done = m_jit.branchDouble(
2148             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2149         
2150         DFG_TYPE_CHECK(
2151             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2152         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2153         
2154         done.link(&m_jit);
2155         
2156         doubleResult(resultFPR, node);
2157         return;
2158     }
2159     
2160     case NotCellUse:
2161     case NumberUse: {
2162         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2163
2164         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2165         if (isInt32Speculation(possibleTypes)) {
2166             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2167             FPRTemporary result(this);
2168             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2169             doubleResult(result.fpr(), node);
2170             return;
2171         }
2172
2173         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2174         FPRTemporary result(this);
2175
2176 #if USE(JSVALUE64)
2177         GPRTemporary temp(this);
2178
2179         GPRReg op1GPR = op1.gpr();
2180         GPRReg tempGPR = temp.gpr();
2181         FPRReg resultFPR = result.fpr();
2182         JITCompiler::JumpList done;
2183
2184         JITCompiler::Jump isInteger = m_jit.branch64(
2185             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2186
2187         if (node->child1().useKind() == NotCellUse) {
2188             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2189             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2190
2191             static const double zero = 0;
2192             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2193
2194             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2195             done.append(isNull);
2196
2197             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2198                 m_jit.branchTest64(JITCompiler::NonZero, op1GPR, TrustedImm32(static_cast<int32_t>(~1))));
2199
2200             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2201             static const double one = 1;
2202             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2203             done.append(isFalse);
2204
2205             isUndefined.link(&m_jit);
2206             static const double NaN = PNaN;
2207             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2208             done.append(m_jit.jump());
2209
2210             isNumber.link(&m_jit);
2211         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2212             typeCheck(
2213                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2214                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2215         }
2216     
2217         m_jit.move(op1GPR, tempGPR);
2218         unboxDouble(tempGPR, resultFPR);
2219         done.append(m_jit.jump());
2220     
2221         isInteger.link(&m_jit);
2222         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2223         done.link(&m_jit);
2224 #else // USE(JSVALUE64) -> this is the 32_64 case
2225         FPRTemporary temp(this);
2226     
2227         GPRReg op1TagGPR = op1.tagGPR();
2228         GPRReg op1PayloadGPR = op1.payloadGPR();
2229         FPRReg tempFPR = temp.fpr();
2230         FPRReg resultFPR = result.fpr();
2231         JITCompiler::JumpList done;
2232     
2233         JITCompiler::Jump isInteger = m_jit.branch32(
2234             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2235
2236         if (node->child1().useKind() == NotCellUse) {
2237             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2238             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2239
2240             static const double zero = 0;
2241             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2242
2243             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2244             done.append(isNull);
2245
2246             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2247
2248             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2249             static const double one = 1;
2250             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2251             done.append(isFalse);
2252
2253             isUndefined.link(&m_jit);
2254             static const double NaN = PNaN;
2255             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2256             done.append(m_jit.jump());
2257
2258             isNumber.link(&m_jit);
2259         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2260             typeCheck(
2261                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2262                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2263         }
2264
2265         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2266         done.append(m_jit.jump());
2267     
2268         isInteger.link(&m_jit);
2269         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2270         done.link(&m_jit);
2271 #endif // USE(JSVALUE64)
2272     
2273         doubleResult(resultFPR, node);
2274         return;
2275     }
2276         
2277 #if USE(JSVALUE64)
2278     case Int52RepUse: {
2279         SpeculateStrictInt52Operand value(this, node->child1());
2280         FPRTemporary result(this);
2281         
2282         GPRReg valueGPR = value.gpr();
2283         FPRReg resultFPR = result.fpr();
2284
2285         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2286         
2287         doubleResult(resultFPR, node);
2288         return;
2289     }
2290 #endif // USE(JSVALUE64)
2291         
2292     default:
2293         RELEASE_ASSERT_NOT_REACHED();
2294         return;
2295     }
2296 }
2297
2298 void SpeculativeJIT::compileValueRep(Node* node)
2299 {
2300     switch (node->child1().useKind()) {
2301     case DoubleRepUse: {
2302         SpeculateDoubleOperand value(this, node->child1());
2303         JSValueRegsTemporary result(this);
2304         
2305         FPRReg valueFPR = value.fpr();
2306         JSValueRegs resultRegs = result.regs();
2307         
2308         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2309         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2310         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2311         // local was purified.
2312         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2313             m_jit.purifyNaN(valueFPR);
2314
2315         boxDouble(valueFPR, resultRegs);
2316         
2317         jsValueResult(resultRegs, node);
2318         return;
2319     }
2320         
2321 #if USE(JSVALUE64)
2322     case Int52RepUse: {
2323         SpeculateStrictInt52Operand value(this, node->child1());
2324         GPRTemporary result(this);
2325         
2326         GPRReg valueGPR = value.gpr();
2327         GPRReg resultGPR = result.gpr();
2328         
2329         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2330         
2331         jsValueResult(resultGPR, node);
2332         return;
2333     }
2334 #endif // USE(JSVALUE64)
2335         
2336     default:
2337         RELEASE_ASSERT_NOT_REACHED();
2338         return;
2339     }
2340 }
2341
2342 static double clampDoubleToByte(double d)
2343 {
2344     d += 0.5;
2345     if (!(d > 0))
2346         d = 0;
2347     else if (d > 255)
2348         d = 255;
2349     return d;
2350 }
2351
2352 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2353 {
2354     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2355     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2356     jit.xorPtr(result, result);
2357     MacroAssembler::Jump clamped = jit.jump();
2358     tooBig.link(&jit);
2359     jit.move(JITCompiler::TrustedImm32(255), result);
2360     clamped.link(&jit);
2361     inBounds.link(&jit);
2362 }
2363
2364 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2365 {
2366     // Unordered compare so we pick up NaN
2367     static const double zero = 0;
2368     static const double byteMax = 255;
2369     static const double half = 0.5;
2370     jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2371     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2372     jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2373     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2374     
2375     jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2376     // FIXME: This should probably just use a floating point round!
2377     // https://bugs.webkit.org/show_bug.cgi?id=72054
2378     jit.addDouble(source, scratch);
2379     jit.truncateDoubleToInt32(scratch, result);   
2380     MacroAssembler::Jump truncatedInt = jit.jump();
2381     
2382     tooSmall.link(&jit);
2383     jit.xorPtr(result, result);
2384     MacroAssembler::Jump zeroed = jit.jump();
2385     
2386     tooBig.link(&jit);
2387     jit.move(JITCompiler::TrustedImm32(255), result);
2388     
2389     truncatedInt.link(&jit);
2390     zeroed.link(&jit);
2391
2392 }
2393
2394 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2395 {
2396     if (node->op() == PutByValAlias)
2397         return JITCompiler::Jump();
2398     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2399         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2400     if (view) {
2401         uint32_t length = view->length();
2402         Node* indexNode = m_jit.graph().child(node, 1).node();
2403         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2404             return JITCompiler::Jump();
2405         return m_jit.branch32(
2406             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2407     }
2408     return m_jit.branch32(
2409         MacroAssembler::AboveOrEqual, indexGPR,
2410         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2411 }
2412
2413 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2414 {
2415     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2416     if (!jump.isSet())
2417         return;
2418     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2419 }
2420
2421 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2422 {
2423     ASSERT(isInt(type));
2424     
2425     SpeculateCellOperand base(this, node->child1());
2426     SpeculateStrictInt32Operand property(this, node->child2());
2427     StorageOperand storage(this, node->child3());
2428
2429     GPRReg baseReg = base.gpr();
2430     GPRReg propertyReg = property.gpr();
2431     GPRReg storageReg = storage.gpr();
2432
2433     GPRTemporary result(this);
2434     GPRReg resultReg = result.gpr();
2435
2436     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2437
2438     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2439     switch (elementSize(type)) {
2440     case 1:
2441         if (isSigned(type))
2442             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2443         else
2444             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2445         break;
2446     case 2:
2447         if (isSigned(type))
2448             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2449         else
2450             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2451         break;
2452     case 4:
2453         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2454         break;
2455     default:
2456         CRASH();
2457     }
2458     if (elementSize(type) < 4 || isSigned(type)) {
2459         int32Result(resultReg, node);
2460         return;
2461     }
2462     
2463     ASSERT(elementSize(type) == 4 && !isSigned(type));
2464     if (node->shouldSpeculateInt32()) {
2465         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2466         int32Result(resultReg, node);
2467         return;
2468     }
2469     
2470 #if USE(JSVALUE64)
2471     if (node->shouldSpeculateMachineInt()) {
2472         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2473         strictInt52Result(resultReg, node);
2474         return;
2475     }
2476 #endif
2477     
2478     FPRTemporary fresult(this);
2479     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2480     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2481     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2482     positive.link(&m_jit);
2483     doubleResult(fresult.fpr(), node);
2484 }
2485
2486 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2487 {
2488     ASSERT(isInt(type));
2489     
2490     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2491     GPRReg storageReg = storage.gpr();
2492     
2493     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2494     
2495     GPRTemporary value;
2496     GPRReg valueGPR = InvalidGPRReg;
2497     
2498     if (valueUse->isConstant()) {
2499         JSValue jsValue = valueUse->asJSValue();
2500         if (!jsValue.isNumber()) {
2501             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2502             noResult(node);
2503             return;
2504         }
2505         double d = jsValue.asNumber();
2506         if (isClamped(type)) {
2507             ASSERT(elementSize(type) == 1);
2508             d = clampDoubleToByte(d);
2509         }
2510         GPRTemporary scratch(this);
2511         GPRReg scratchReg = scratch.gpr();
2512         m_jit.move(Imm32(toInt32(d)), scratchReg);
2513         value.adopt(scratch);
2514         valueGPR = scratchReg;
2515     } else {
2516         switch (valueUse.useKind()) {
2517         case Int32Use: {
2518             SpeculateInt32Operand valueOp(this, valueUse);
2519             GPRTemporary scratch(this);
2520             GPRReg scratchReg = scratch.gpr();
2521             m_jit.move(valueOp.gpr(), scratchReg);
2522             if (isClamped(type)) {
2523                 ASSERT(elementSize(type) == 1);
2524                 compileClampIntegerToByte(m_jit, scratchReg);
2525             }
2526             value.adopt(scratch);
2527             valueGPR = scratchReg;
2528             break;
2529         }
2530             
2531 #if USE(JSVALUE64)
2532         case Int52RepUse: {
2533             SpeculateStrictInt52Operand valueOp(this, valueUse);
2534             GPRTemporary scratch(this);
2535             GPRReg scratchReg = scratch.gpr();
2536             m_jit.move(valueOp.gpr(), scratchReg);
2537             if (isClamped(type)) {
2538                 ASSERT(elementSize(type) == 1);
2539                 MacroAssembler::Jump inBounds = m_jit.branch64(
2540                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2541                 MacroAssembler::Jump tooBig = m_jit.branch64(
2542                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2543                 m_jit.move(TrustedImm32(0), scratchReg);
2544                 MacroAssembler::Jump clamped = m_jit.jump();
2545                 tooBig.link(&m_jit);
2546                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2547                 clamped.link(&m_jit);
2548                 inBounds.link(&m_jit);
2549             }
2550             value.adopt(scratch);
2551             valueGPR = scratchReg;
2552             break;
2553         }
2554 #endif // USE(JSVALUE64)
2555             
2556         case DoubleRepUse: {
2557             if (isClamped(type)) {
2558                 ASSERT(elementSize(type) == 1);
2559                 SpeculateDoubleOperand valueOp(this, valueUse);
2560                 GPRTemporary result(this);
2561                 FPRTemporary floatScratch(this);
2562                 FPRReg fpr = valueOp.fpr();
2563                 GPRReg gpr = result.gpr();
2564                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2565                 value.adopt(result);
2566                 valueGPR = gpr;
2567             } else {
2568                 SpeculateDoubleOperand valueOp(this, valueUse);
2569                 GPRTemporary result(this);
2570                 FPRReg fpr = valueOp.fpr();
2571                 GPRReg gpr = result.gpr();
2572                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2573                 m_jit.xorPtr(gpr, gpr);
2574                 MacroAssembler::Jump fixed = m_jit.jump();
2575                 notNaN.link(&m_jit);
2576                 
2577                 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2578                     fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2579                 
2580                 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr));
2581                 
2582                 fixed.link(&m_jit);
2583                 value.adopt(result);
2584                 valueGPR = gpr;
2585             }
2586             break;
2587         }
2588             
2589         default:
2590             RELEASE_ASSERT_NOT_REACHED();
2591             break;
2592         }
2593     }
2594     
2595     ASSERT_UNUSED(valueGPR, valueGPR != property);
2596     ASSERT(valueGPR != base);
2597     ASSERT(valueGPR != storageReg);
2598     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2599     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2600         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2601         outOfBounds = MacroAssembler::Jump();
2602     }
2603
2604     switch (elementSize(type)) {
2605     case 1:
2606         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2607         break;
2608     case 2:
2609         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2610         break;
2611     case 4:
2612         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2613         break;
2614     default:
2615         CRASH();
2616     }
2617     if (outOfBounds.isSet())
2618         outOfBounds.link(&m_jit);
2619     noResult(node);
2620 }
2621
2622 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2623 {
2624     ASSERT(isFloat(type));
2625     
2626     SpeculateCellOperand base(this, node->child1());
2627     SpeculateStrictInt32Operand property(this, node->child2());
2628     StorageOperand storage(this, node->child3());
2629
2630     GPRReg baseReg = base.gpr();
2631     GPRReg propertyReg = property.gpr();
2632     GPRReg storageReg = storage.gpr();
2633
2634     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2635
2636     FPRTemporary result(this);
2637     FPRReg resultReg = result.fpr();
2638     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2639     switch (elementSize(type)) {
2640     case 4:
2641         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2642         m_jit.convertFloatToDouble(resultReg, resultReg);
2643         break;
2644     case 8: {
2645         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2646         break;
2647     }
2648     default:
2649         RELEASE_ASSERT_NOT_REACHED();
2650     }
2651     
2652     doubleResult(resultReg, node);
2653 }
2654
2655 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2656 {
2657     ASSERT(isFloat(type));
2658     
2659     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2660     GPRReg storageReg = storage.gpr();
2661     
2662     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2663     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2664
2665     SpeculateDoubleOperand valueOp(this, valueUse);
2666     FPRTemporary scratch(this);
2667     FPRReg valueFPR = valueOp.fpr();
2668     FPRReg scratchFPR = scratch.fpr();
2669
2670     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2671     
2672     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2673     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2674         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2675         outOfBounds = MacroAssembler::Jump();
2676     }
2677     
2678     switch (elementSize(type)) {
2679     case 4: {
2680         m_jit.moveDouble(valueFPR, scratchFPR);
2681         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2682         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2683         break;
2684     }
2685     case 8:
2686         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2687         break;
2688     default:
2689         RELEASE_ASSERT_NOT_REACHED();
2690     }
2691     if (outOfBounds.isSet())
2692         outOfBounds.link(&m_jit);
2693     noResult(node);
2694 }
2695
2696 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2697 {
2698     // Check that prototype is an object.
2699     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
2700     
2701     // Initialize scratchReg with the value being checked.
2702     m_jit.move(valueReg, scratchReg);
2703     
2704     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2705     MacroAssembler::Label loop(&m_jit);
2706     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2707     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2708     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2709 #if USE(JSVALUE64)
2710     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2711 #else
2712     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2713 #endif
2714     
2715     // No match - result is false.
2716 #if USE(JSVALUE64)
2717     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2718 #else
2719     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2720 #endif
2721     MacroAssembler::Jump putResult = m_jit.jump();
2722     
2723     isInstance.link(&m_jit);
2724 #if USE(JSVALUE64)
2725     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2726 #else
2727     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2728 #endif
2729     
2730     putResult.link(&m_jit);
2731 }
2732
2733 void SpeculativeJIT::compileInstanceOf(Node* node)
2734 {
2735     if (node->child1().useKind() == UntypedUse) {
2736         // It might not be a cell. Speculate less aggressively.
2737         // Or: it might only be used once (i.e. by us), so we get zero benefit
2738         // from speculating any more aggressively than we absolutely need to.
2739         
2740         JSValueOperand value(this, node->child1());
2741         SpeculateCellOperand prototype(this, node->child2());
2742         GPRTemporary scratch(this);
2743         GPRTemporary scratch2(this);
2744         
2745         GPRReg prototypeReg = prototype.gpr();
2746         GPRReg scratchReg = scratch.gpr();
2747         GPRReg scratch2Reg = scratch2.gpr();
2748         
2749         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
2750         GPRReg valueReg = value.jsValueRegs().payloadGPR();
2751         moveFalseTo(scratchReg);
2752
2753         MacroAssembler::Jump done = m_jit.jump();
2754         
2755         isCell.link(&m_jit);
2756         
2757         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2758         
2759         done.link(&m_jit);
2760
2761         blessedBooleanResult(scratchReg, node);
2762         return;
2763     }
2764     
2765     SpeculateCellOperand value(this, node->child1());
2766     SpeculateCellOperand prototype(this, node->child2());
2767     
2768     GPRTemporary scratch(this);
2769     GPRTemporary scratch2(this);
2770     
2771     GPRReg valueReg = value.gpr();
2772     GPRReg prototypeReg = prototype.gpr();
2773     GPRReg scratchReg = scratch.gpr();
2774     GPRReg scratch2Reg = scratch2.gpr();
2775     
2776     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2777
2778     blessedBooleanResult(scratchReg, node);
2779 }
2780
2781 void SpeculativeJIT::compileAdd(Node* node)
2782 {
2783     switch (node->binaryUseKind()) {
2784     case Int32Use: {
2785         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2786         
2787         if (node->child1()->isInt32Constant()) {
2788             int32_t imm1 = node->child1()->asInt32();
2789             SpeculateInt32Operand op2(this, node->child2());
2790             GPRTemporary result(this);
2791
2792             if (!shouldCheckOverflow(node->arithMode())) {
2793                 m_jit.move(op2.gpr(), result.gpr());
2794                 m_jit.add32(Imm32(imm1), result.gpr());
2795             } else
2796                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
2797
2798             int32Result(result.gpr(), node);
2799             return;
2800         }
2801         
2802         if (node->child2()->isInt32Constant()) {
2803             SpeculateInt32Operand op1(this, node->child1());
2804             int32_t imm2 = node->child2()->asInt32();
2805             GPRTemporary result(this);
2806                 
2807             if (!shouldCheckOverflow(node->arithMode())) {
2808                 m_jit.move(op1.gpr(), result.gpr());
2809                 m_jit.add32(Imm32(imm2), result.gpr());
2810             } else
2811                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
2812
2813             int32Result(result.gpr(), node);
2814             return;
2815         }
2816                 
2817         SpeculateInt32Operand op1(this, node->child1());
2818         SpeculateInt32Operand op2(this, node->child2());
2819         GPRTemporary result(this, Reuse, op1, op2);
2820
2821         GPRReg gpr1 = op1.gpr();
2822         GPRReg gpr2 = op2.gpr();
2823         GPRReg gprResult = result.gpr();
2824
2825         if (!shouldCheckOverflow(node->arithMode())) {
2826             if (gpr1 == gprResult)
2827                 m_jit.add32(gpr2, gprResult);
2828             else {
2829                 m_jit.move(gpr2, gprResult);
2830                 m_jit.add32(gpr1, gprResult);
2831             }
2832         } else {
2833             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
2834                 
2835             if (gpr1 == gprResult)
2836                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
2837             else if (gpr2 == gprResult)
2838                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
2839             else
2840                 speculationCheck(Overflow, JSValueRegs(), 0, check);
2841         }
2842
2843         int32Result(gprResult, node);
2844         return;
2845     }
2846         
2847 #if USE(JSVALUE64)
2848     case Int52RepUse: {
2849         ASSERT(shouldCheckOverflow(node->arithMode()));
2850         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2851
2852         // Will we need an overflow check? If we can prove that neither input can be
2853         // Int52 then the overflow check will not be necessary.
2854         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2855             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2856             SpeculateWhicheverInt52Operand op1(this, node->child1());
2857             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2858             GPRTemporary result(this, Reuse, op1);
2859             m_jit.move(op1.gpr(), result.gpr());
2860             m_jit.add64(op2.gpr(), result.gpr());
2861             int52Result(result.gpr(), node, op1.format());
2862             return;
2863         }
2864         
2865         SpeculateInt52Operand op1(this, node->child1());
2866         SpeculateInt52Operand op2(this, node->child2());
2867         GPRTemporary result(this);
2868         m_jit.move(op1.gpr(), result.gpr());
2869         speculationCheck(
2870             Int52Overflow, JSValueRegs(), 0,
2871             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2872         int52Result(result.gpr(), node);
2873         return;
2874     }
2875 #endif // USE(JSVALUE64)
2876     
2877     case DoubleRepUse: {
2878         SpeculateDoubleOperand op1(this, node->child1());
2879         SpeculateDoubleOperand op2(this, node->child2());
2880         FPRTemporary result(this, op1, op2);
2881
2882         FPRReg reg1 = op1.fpr();
2883         FPRReg reg2 = op2.fpr();
2884         m_jit.addDouble(reg1, reg2, result.fpr());
2885
2886         doubleResult(result.fpr(), node);
2887         return;
2888     }
2889         
2890     default:
2891         RELEASE_ASSERT_NOT_REACHED();
2892         break;
2893     }
2894 }
2895
2896 void SpeculativeJIT::compileMakeRope(Node* node)
2897 {
2898     ASSERT(node->child1().useKind() == KnownStringUse);
2899     ASSERT(node->child2().useKind() == KnownStringUse);
2900     ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
2901     
2902     SpeculateCellOperand op1(this, node->child1());
2903     SpeculateCellOperand op2(this, node->child2());
2904     SpeculateCellOperand op3(this, node->child3());
2905     GPRTemporary result(this);
2906     GPRTemporary allocator(this);
2907     GPRTemporary scratch(this);
2908     
2909     GPRReg opGPRs[3];
2910     unsigned numOpGPRs;
2911     opGPRs[0] = op1.gpr();
2912     opGPRs[1] = op2.gpr();
2913     if (node->child3()) {
2914         opGPRs[2] = op3.gpr();
2915         numOpGPRs = 3;
2916     } else {
2917         opGPRs[2] = InvalidGPRReg;
2918         numOpGPRs = 2;
2919     }
2920     GPRReg resultGPR = result.gpr();
2921     GPRReg allocatorGPR = allocator.gpr();
2922     GPRReg scratchGPR = scratch.gpr();
2923     
2924     JITCompiler::JumpList slowPath;
2925     MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithDestructor(sizeof(JSRopeString));
2926     m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
2927     emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
2928         
2929     m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
2930     for (unsigned i = 0; i < numOpGPRs; ++i)
2931         m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2932     for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
2933         m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2934     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
2935     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
2936     if (!ASSERT_DISABLED) {
2937         JITCompiler::Jump ok = m_jit.branch32(
2938             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2939         m_jit.abortWithReason(DFGNegativeStringLength);
2940         ok.link(&m_jit);
2941     }
2942     for (unsigned i = 1; i < numOpGPRs; ++i) {
2943         m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
2944         speculationCheck(
2945             Uncountable, JSValueSource(), nullptr,
2946             m_jit.branchAdd32(
2947                 JITCompiler::Overflow,
2948                 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
2949     }
2950     m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
2951     m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
2952     if (!ASSERT_DISABLED) {
2953         JITCompiler::Jump ok = m_jit.branch32(
2954             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2955         m_jit.abortWithReason(DFGNegativeStringLength);
2956         ok.link(&m_jit);
2957     }
2958     m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
2959     
2960     switch (numOpGPRs) {
2961     case 2:
2962         addSlowPathGenerator(slowPathCall(
2963             slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
2964         break;
2965     case 3:
2966         addSlowPathGenerator(slowPathCall(
2967             slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
2968         break;
2969     default:
2970         RELEASE_ASSERT_NOT_REACHED();
2971         break;
2972     }
2973         
2974     cellResult(resultGPR, node);
2975 }
2976
2977 void SpeculativeJIT::compileArithClz32(Node* node)
2978 {
2979     ASSERT_WITH_MESSAGE(node->child1().useKind() == Int32Use || node->child1().useKind() == KnownInt32Use, "The Fixup phase should have enforced a Int32 operand.");
2980     SpeculateInt32Operand value(this, node->child1());
2981     GPRTemporary result(this, Reuse, value);
2982     GPRReg valueReg = value.gpr();
2983     GPRReg resultReg = result.gpr();
2984     m_jit.countLeadingZeros32(valueReg, resultReg);
2985     int32Result(resultReg, node);
2986 }
2987
2988 void SpeculativeJIT::compileArithSub(Node* node)
2989 {
2990     switch (node->binaryUseKind()) {
2991     case Int32Use: {
2992         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2993         
2994         if (node->child2()->isInt32Constant()) {
2995             SpeculateInt32Operand op1(this, node->child1());
2996             int32_t imm2 = node->child2()->asInt32();
2997             GPRTemporary result(this);
2998
2999             if (!shouldCheckOverflow(node->arithMode())) {
3000                 m_jit.move(op1.gpr(), result.gpr());
3001                 m_jit.sub32(Imm32(imm2), result.gpr());
3002             } else {
3003                 GPRTemporary scratch(this);
3004                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
3005             }
3006
3007             int32Result(result.gpr(), node);
3008             return;
3009         }
3010             
3011         if (node->child1()->isInt32Constant()) {
3012             int32_t imm1 = node->child1()->asInt32();
3013             SpeculateInt32Operand op2(this, node->child2());
3014             GPRTemporary result(this);
3015                 
3016             m_jit.move(Imm32(imm1), result.gpr());
3017             if (!shouldCheckOverflow(node->arithMode()))
3018                 m_jit.sub32(op2.gpr(), result.gpr());
3019             else
3020                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3021                 
3022             int32Result(result.gpr(), node);
3023             return;
3024         }
3025             
3026         SpeculateInt32Operand op1(this, node->child1());
3027         SpeculateInt32Operand op2(this, node->child2());
3028         GPRTemporary result(this);
3029
3030         if (!shouldCheckOverflow(node->arithMode())) {
3031             m_jit.move(op1.gpr(), result.gpr());
3032             m_jit.sub32(op2.gpr(), result.gpr());
3033         } else
3034             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
3035
3036         int32Result(result.gpr(), node);
3037         return;
3038     }
3039         
3040 #if USE(JSVALUE64)
3041     case Int52RepUse: {
3042         ASSERT(shouldCheckOverflow(node->arithMode()));
3043         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3044
3045         // Will we need an overflow check? If we can prove that neither input can be
3046         // Int52 then the overflow check will not be necessary.
3047         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
3048             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
3049             SpeculateWhicheverInt52Operand op1(this, node->child1());
3050             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3051             GPRTemporary result(this, Reuse, op1);
3052             m_jit.move(op1.gpr(), result.gpr());
3053             m_jit.sub64(op2.gpr(), result.gpr());
3054             int52Result(result.gpr(), node, op1.format());
3055             return;
3056         }
3057         
3058         SpeculateInt52Operand op1(this, node->child1());
3059         SpeculateInt52Operand op2(this, node->child2());
3060         GPRTemporary result(this);
3061         m_jit.move(op1.gpr(), result.gpr());
3062         speculationCheck(
3063             Int52Overflow, JSValueRegs(), 0,
3064             m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3065         int52Result(result.gpr(), node);
3066         return;
3067     }
3068 #endif // USE(JSVALUE64)
3069
3070     case DoubleRepUse: {
3071         SpeculateDoubleOperand op1(this, node->child1());
3072         SpeculateDoubleOperand op2(this, node->child2());
3073         FPRTemporary result(this, op1);
3074
3075         FPRReg reg1 = op1.fpr();
3076         FPRReg reg2 = op2.fpr();
3077         m_jit.subDouble(reg1, reg2, result.fpr());
3078
3079         doubleResult(result.fpr(), node);
3080         return;
3081     }
3082         
3083     default:
3084         RELEASE_ASSERT_NOT_REACHED();
3085         return;
3086     }
3087 }
3088
3089 void SpeculativeJIT::compileArithNegate(Node* node)
3090 {
3091     switch (node->child1().useKind()) {
3092     case Int32Use: {
3093         SpeculateInt32Operand op1(this, node->child1());
3094         GPRTemporary result(this);
3095
3096         m_jit.move(op1.gpr(), result.gpr());
3097
3098         // Note: there is no notion of being not used as a number, but someone
3099         // caring about negative zero.
3100         
3101         if (!shouldCheckOverflow(node->arithMode()))
3102             m_jit.neg32(result.gpr());
3103         else if (!shouldCheckNegativeZero(node->arithMode()))
3104             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
3105         else {
3106             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
3107             m_jit.neg32(result.gpr());
3108         }
3109
3110         int32Result(result.gpr(), node);
3111         return;
3112     }
3113
3114 #if USE(JSVALUE64)
3115     case Int52RepUse: {
3116         ASSERT(shouldCheckOverflow(node->arithMode()));
3117         
3118         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)) {
3119             SpeculateWhicheverInt52Operand op1(this, node->child1());
3120             GPRTemporary result(this);
3121             GPRReg op1GPR = op1.gpr();
3122             GPRReg resultGPR = result.gpr();
3123             m_jit.move(op1GPR, resultGPR);
3124             m_jit.neg64(resultGPR);
3125             if (shouldCheckNegativeZero(node->arithMode())) {
3126                 speculationCheck(
3127                     NegativeZero, JSValueRegs(), 0,
3128                     m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3129             }
3130             int52Result(resultGPR, node, op1.format());
3131             return;
3132         }
3133         
3134         SpeculateInt52Operand op1(this, node->child1());
3135         GPRTemporary result(this);
3136         GPRReg op1GPR = op1.gpr();
3137         GPRReg resultGPR = result.gpr();
3138         m_jit.move(op1GPR, resultGPR);
3139         speculationCheck(
3140             Int52Overflow, JSValueRegs(), 0,
3141             m_jit.branchNeg64(MacroAssembler::Overflow, resultGPR));
3142         if (shouldCheckNegativeZero(node->arithMode())) {
3143             speculationCheck(
3144                 NegativeZero, JSValueRegs(), 0,
3145                 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3146         }
3147         int52Result(resultGPR, node);
3148         return;
3149     }
3150 #endif // USE(JSVALUE64)
3151         
3152     case DoubleRepUse: {
3153         SpeculateDoubleOperand op1(this, node->child1());
3154         FPRTemporary result(this);
3155         
3156         m_jit.negateDouble(op1.fpr(), result.fpr());
3157         
3158         doubleResult(result.fpr(), node);
3159         return;
3160     }
3161         
3162     default:
3163         RELEASE_ASSERT_NOT_REACHED();
3164         return;
3165     }
3166 }
3167 void SpeculativeJIT::compileArithMul(Node* node)
3168 {
3169     switch (node->binaryUseKind()) {
3170     case Int32Use: {
3171         SpeculateInt32Operand op1(this, node->child1());
3172         SpeculateInt32Operand op2(this, node->child2());
3173         GPRTemporary result(this);
3174
3175         GPRReg reg1 = op1.gpr();
3176         GPRReg reg2 = op2.gpr();
3177
3178         // We can perform truncated multiplications if we get to this point, because if the
3179         // fixup phase could not prove that it would be safe, it would have turned us into
3180         // a double multiplication.
3181         if (!shouldCheckOverflow(node->arithMode())) {
3182             m_jit.move(reg1, result.gpr());
3183             m_jit.mul32(reg2, result.gpr());
3184         } else {
3185             speculationCheck(
3186                 Overflow, JSValueRegs(), 0,
3187                 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3188         }
3189             
3190         // Check for negative zero, if the users of this node care about such things.
3191         if (shouldCheckNegativeZero(node->arithMode())) {
3192             MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3193             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
3194             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
3195             resultNonZero.link(&m_jit);
3196         }
3197
3198         int32Result(result.gpr(), node);
3199         return;
3200     }
3201     
3202 #if USE(JSVALUE64)   
3203     case Int52RepUse: {
3204         ASSERT(shouldCheckOverflow(node->arithMode()));
3205         
3206         // This is super clever. We want to do an int52 multiplication and check the
3207         // int52 overflow bit. There is no direct hardware support for this, but we do
3208         // have the ability to do an int64 multiplication and check the int64 overflow
3209         // bit. We leverage that. Consider that a, b are int52 numbers inside int64
3210         // registers, with the high 12 bits being sign-extended. We can do:
3211         //
3212         //     (a * (b << 12))
3213         //
3214         // This will give us a left-shifted int52 (value is in high 52 bits, low 16
3215         // bits are zero) plus the int52 overflow bit. I.e. whether this 64-bit
3216         // multiplication overflows is identical to whether the 'a * b' 52-bit
3217         // multiplication overflows.
3218         //
3219         // In our nomenclature, this is:
3220         //
3221         //     strictInt52(a) * int52(b) => int52
3222         //
3223         // That is "strictInt52" means unshifted and "int52" means left-shifted by 16
3224         // bits.
3225         //
3226         // We don't care which of op1 or op2 serves as the left-shifted operand, so
3227         // we just do whatever is more convenient for op1 and have op2 do the
3228         // opposite. This ensures that we do at most one shift.
3229
3230         SpeculateWhicheverInt52Operand op1(this, node->child1());
3231         SpeculateWhicheverInt52Operand op2(this, node->child2(), OppositeShift, op1);
3232         GPRTemporary result(this);
3233         
3234         GPRReg op1GPR = op1.gpr();
3235         GPRReg op2GPR = op2.gpr();
3236         GPRReg resultGPR = result.gpr();
3237         
3238         m_jit.move(op1GPR, resultGPR);
3239         speculationCheck(
3240             Int52Overflow, JSValueRegs(), 0,
3241             m_jit.branchMul64(MacroAssembler::Overflow, op2GPR, resultGPR));
3242         
3243         if (shouldCheckNegativeZero(node->arithMode())) {
3244             MacroAssembler::Jump resultNonZero = m_jit.branchTest64(
3245                 MacroAssembler::NonZero, resultGPR);
3246             speculationCheck(
3247                 NegativeZero, JSValueRegs(), 0,
3248                 m_jit.branch64(MacroAssembler::LessThan, op1GPR, TrustedImm64(0)));
3249             speculationCheck(
3250                 NegativeZero, JSValueRegs(), 0,
3251                 m_jit.branch64(MacroAssembler::LessThan, op2GPR, TrustedImm64(0)));
3252             resultNonZero.link(&m_jit);
3253         }
3254         
3255         int52Result(resultGPR, node);
3256         return;
3257     }
3258 #endif // USE(JSVALUE64)
3259         
3260     case DoubleRepUse: {
3261         SpeculateDoubleOperand op1(this, node->child1());
3262         SpeculateDoubleOperand op2(this, node->child2());
3263         FPRTemporary result(this, op1, op2);
3264         
3265         FPRReg reg1 = op1.fpr();
3266         FPRReg reg2 = op2.fpr();
3267         
3268         m_jit.mulDouble(reg1, reg2, result.fpr());
3269         
3270         doubleResult(result.fpr(), node);
3271         return;
3272     }
3273         
3274     default:
3275         RELEASE_ASSERT_NOT_REACHED();
3276         return;
3277     }
3278 }
3279
3280 void SpeculativeJIT::compileArithDiv(Node* node)
3281 {
3282     switch (node->binaryUseKind()) {
3283     case Int32Use: {
3284 #if CPU(X86) || CPU(X86_64)
3285         SpeculateInt32Operand op1(this, node->child1());
3286         SpeculateInt32Operand op2(this, node->child2());
3287         GPRTemporary eax(this, X86Registers::eax);
3288         GPRTemporary edx(this, X86Registers::edx);
3289         GPRReg op1GPR = op1.gpr();
3290         GPRReg op2GPR = op2.gpr();
3291     
3292         GPRReg op2TempGPR;
3293         GPRReg temp;
3294         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3295             op2TempGPR = allocate();
3296             temp = op2TempGPR;
3297         } else {
3298             op2TempGPR = InvalidGPRReg;
3299             if (op1GPR == X86Registers::eax)
3300                 temp = X86Registers::edx;
3301             else
3302                 temp = X86Registers::eax;
3303         }
3304     
3305         ASSERT(temp != op1GPR);
3306         ASSERT(temp != op2GPR);
3307     
3308         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3309     
3310         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3311     
3312         JITCompiler::JumpList done;
3313         if (shouldCheckOverflow(node->arithMode())) {
3314             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3315             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3316         } else {
3317             // This is the case where we convert the result to an int after we're done, and we
3318             // already know that the denominator is either -1 or 0. So, if the denominator is
3319             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3320             // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
3321             // are happy to fall through to a normal division, since we're just dividing
3322             // something by negative 1.
3323         
3324             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3325             m_jit.move(TrustedImm32(0), eax.gpr());
3326             done.append(m_jit.jump());
3327         
3328             notZero.link(&m_jit);
3329             JITCompiler::Jump notNeg2ToThe31 =
3330                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3331             m_jit.zeroExtend32ToPtr(op1GPR, eax.gpr());
3332             done.append(m_jit.jump());
3333         
3334             notNeg2ToThe31.link(&m_jit);
3335         }
3336     
3337         safeDenominator.link(&m_jit);
3338     
3339         // If the user cares about negative zero, then speculate that we're not about
3340         // to produce negative zero.
3341         if (shouldCheckNegativeZero(node->arithMode())) {
3342             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3343             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3344             numeratorNonZero.link(&m_jit);
3345         }
3346     
3347         if (op2TempGPR != InvalidGPRReg) {
3348             m_jit.move(op2GPR, op2TempGPR);
3349             op2GPR = op2TempGPR;
3350         }
3351             
3352         m_jit.move(op1GPR, eax.gpr());
3353         m_jit.assembler().cdq();
3354         m_jit.assembler().idivl_r(op2GPR);
3355             
3356         if (op2TempGPR != InvalidGPRReg)
3357             unlock(op2TempGPR);
3358
3359         // Check that there was no remainder. If there had been, then we'd be obligated to
3360         // produce a double result instead.
3361         if (shouldCheckOverflow(node->arithMode()))
3362             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
3363         
3364         done.link(&m_jit);
3365         int32Result(eax.gpr(), node);
3366 #elif HAVE(ARM_IDIV_INSTRUCTIONS) || CPU(ARM64)
3367         SpeculateInt32Operand op1(this, node->child1());
3368         SpeculateInt32Operand op2(this, node->child2());
3369         GPRReg op1GPR = op1.gpr();
3370         GPRReg op2GPR = op2.gpr();
3371         GPRTemporary quotient(this);
3372         GPRTemporary multiplyAnswer(this);
3373
3374         // If the user cares about negative zero, then speculate that we're not about
3375         // to produce negative zero.
3376         if (shouldCheckNegativeZero(node->arithMode())) {
3377             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3378             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3379             numeratorNonZero.link(&m_jit);
3380         }
3381
3382         m_jit.assembler().sdiv<32>(quotient.gpr(), op1GPR, op2GPR);
3383
3384         // Check that there was no remainder. If there had been, then we'd be obligated to
3385         // produce a double result instead.
3386         if (shouldCheckOverflow(node->arithMode())) {
3387             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
3388             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
3389         }
3390
3391         int32Result(quotient.gpr(), node);
3392 #else
3393         RELEASE_ASSERT_NOT_REACHED();
3394 #endif
3395         break;
3396     }
3397         
3398     case DoubleRepUse: {
3399         SpeculateDoubleOperand op1(this, node->child1());
3400         SpeculateDoubleOperand op2(this, node->child2());
3401         FPRTemporary result(this, op1);
3402         
3403         FPRReg reg1 = op1.fpr();
3404         FPRReg reg2 = op2.fpr();
3405         m_jit.divDouble(reg1, reg2, result.fpr());
3406         
3407         doubleResult(result.fpr(), node);
3408         break;
3409     }
3410         
3411     default:
3412         RELEASE_ASSERT_NOT_REACHED();
3413         break;
3414     }
3415 }
3416
3417 void SpeculativeJIT::compileArithMod(Node* node)
3418 {
3419     switch (node->binaryUseKind()) {
3420     case Int32Use: {
3421         // In the fast path, the dividend value could be the final result
3422         // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
3423         SpeculateStrictInt32Operand op1(this, node->child1());
3424         
3425         if (node->child2()->isInt32Constant()) {
3426             int32_t divisor = node->child2()->asInt32();
3427             if (divisor > 1 && hasOneBitSet(divisor)) {
3428                 unsigned logarithm = WTF::fastLog2(divisor);
3429                 GPRReg dividendGPR = op1.gpr();
3430                 GPRTemporary result(this);
3431                 GPRReg resultGPR = result.gpr();
3432
3433                 // This is what LLVM generates. It's pretty crazy. Here's my
3434                 // attempt at understanding it.
3435                 
3436                 // First, compute either divisor - 1, or 0, depending on whether
3437                 // the dividend is negative:
3438                 //
3439                 // If dividend < 0:  resultGPR = divisor - 1
3440                 // If dividend >= 0: resultGPR = 0
3441                 m_jit.move(dividendGPR, resultGPR);
3442                 m_jit.rshift32(TrustedImm32(31), resultGPR);
3443                 m_jit.urshift32(TrustedImm32(32 - logarithm), resultGPR);
3444                 
3445                 // Add in the dividend, so that:
3446                 //
3447                 // If dividend < 0:  resultGPR = dividend + divisor - 1
3448                 // If dividend >= 0: resultGPR = dividend
3449                 m_jit.add32(dividendGPR, resultGPR);
3450                 
3451                 // Mask so as to only get the *high* bits. This rounds down
3452                 // (towards negative infinity) resultGPR to the nearest multiple
3453                 // of divisor, so that:
3454                 //
3455                 // If dividend < 0:  resultGPR = floor((dividend + divisor - 1) / divisor)
3456                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3457                 //
3458                 // Note that this can be simplified to:
3459                 //
3460                 // If dividend < 0:  resultGPR = ceil(dividend / divisor)
3461                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3462                 //
3463                 // Note that if the dividend is negative, resultGPR will also be negative.
3464                 // Regardless of the sign of dividend, resultGPR will be rounded towards
3465                 // zero, because of how things are conditionalized.
3466                 m_jit.and32(TrustedImm32(-divisor), resultGPR);
3467                 
3468                 // Subtract resultGPR from dividendGPR, which yields the remainder:
3469                 //
3470                 // resultGPR = dividendGPR - resultGPR
3471                 m_jit.neg32(resultGPR);
3472                 m_jit.add32(dividendGPR, resultGPR);
3473                 
3474                 if (shouldCheckNegativeZero(node->arithMode())) {
3475                     // Check that we're not about to create negative zero.
3476                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3477                     speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, resultGPR));
3478                     numeratorPositive.link(&m_jit);
3479                 }
3480
3481                 int32Result(resultGPR, node);
3482                 return;
3483             }
3484         }
3485         
3486 #if CPU(X86) || CPU(X86_64)
3487         if (node->child2()->isInt32Constant()) {
3488             int32_t divisor = node->child2()->asInt32();
3489             if (divisor && divisor != -1) {
3490                 GPRReg op1Gpr = op1.gpr();
3491
3492                 GPRTemporary eax(this, X86Registers::eax);
3493                 GPRTemporary edx(this, X86Registers::edx);
3494                 GPRTemporary scratch(this);
3495                 GPRReg scratchGPR = scratch.gpr();
3496
3497                 GPRReg op1SaveGPR;
3498                 if (op1Gpr == X86Registers::eax || op1Gpr == X86Registers::edx) {
3499                     op1SaveGPR = allocate();
3500                     ASSERT(op1Gpr != op1SaveGPR);
3501                     m_jit.move(op1Gpr, op1SaveGPR);
3502                 } else
3503                     op1SaveGPR = op1Gpr;
3504                 ASSERT(op1SaveGPR != X86Registers::eax);
3505                 ASSERT(op1SaveGPR != X86Registers::edx);
3506
3507                 m_jit.move(op1Gpr, eax.gpr());
3508                 m_jit.move(TrustedImm32(divisor), scratchGPR);
3509                 m_jit.assembler().cdq();
3510                 m_jit.assembler().idivl_r(scratchGPR);
3511                 if (shouldCheckNegativeZero(node->arithMode())) {
3512                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3513                     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3514                     numeratorPositive.link(&m_jit);
3515                 }
3516             
3517                 if (op1SaveGPR != op1Gpr)
3518                     unlock(op1SaveGPR);
3519
3520                 int32Result(edx.gpr(), node);
3521                 return;
3522             }
3523         }
3524 #endif
3525
3526         SpeculateInt32Operand op2(this, node->child2());
3527 #if CPU(X86) || CPU(X86_64)
3528         GPRTemporary eax(this, X86Registers::eax);
3529         GPRTemporary edx(this, X86Registers::edx);
3530         GPRReg op1GPR = op1.gpr();
3531         GPRReg op2GPR = op2.gpr();
3532     
3533         GPRReg op2TempGPR;
3534         GPRReg temp;
3535         GPRReg op1SaveGPR;
3536     
3537         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3538             op2TempGPR = allocate();
3539             temp = op2TempGPR;
3540         } else {
3541             op2TempGPR = InvalidGPRReg;
3542             if (op1GPR == X86Registers::eax)
3543                 temp = X86Registers::edx;
3544             else
3545                 temp = X86Registers::eax;
3546         }
3547     
3548         if (op1GPR == X86Registers::eax || op1GPR == X86Registers::edx) {
3549             op1SaveGPR = allocate();
3550             ASSERT(op1GPR != op1SaveGPR);
3551             m_jit.move(op1GPR, op1SaveGPR);
3552         } else
3553             op1SaveGPR = op1GPR;
3554     
3555         ASSERT(temp != op1GPR);
3556         ASSERT(temp != op2GPR);
3557         ASSERT(op1SaveGPR != X86Registers::eax);
3558         ASSERT(op1SaveGPR != X86Registers::edx);
3559     
3560         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3561     
3562         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3563     
3564         JITCompiler::JumpList done;
3565         
3566         // FIXME: -2^31 / -1 will actually yield negative zero, so we could have a
3567         // separate case for that. But it probably doesn't matter so much.
3568         if (shouldCheckOverflow(node->arithMode())) {
3569             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3570             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3571         } else {
3572             // This is the case where we convert the result to an int after we're done, and we
3573             // already know that the denominator is either -1 or 0. So, if the denominator is
3574             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3575             // -1) and the numerator is -2^31 then the result should be 0. Otherwise we are
3576             // happy to fall through to a normal division, since we're just dividing something
3577             // by negative 1.
3578         
3579             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3580             m_jit.move(TrustedImm32(0), edx.gpr());
3581             done.append(m_jit.jump());
3582         
3583             notZero.link(&m_jit);
3584             JITCompiler::Jump notNeg2ToThe31 =
3585                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3586             m_jit.move(TrustedImm32(0), edx.gpr());
3587             done.append(m_jit.jump());
3588         
3589             notNeg2ToThe31.link(&m_jit);
3590         }
3591         
3592         safeDenominator.link(&m_jit);
3593             
3594         if (op2TempGPR != InvalidGPRReg) {
3595             m_jit.move(op2GPR, op2TempGPR);
3596             op2GPR = op2TempGPR;
3597         }
3598             
3599         m_jit.move(op1GPR, eax.gpr());
3600         m_jit.assembler().cdq();
3601         m_jit.assembler().idivl_r(op2GPR);
3602             
3603         if (op2TempGPR != InvalidGPRReg)
3604             unlock(op2TempGPR);
3605
3606         // Check that we're not about to create negative zero.
3607         if (shouldCheckNegativeZero(node->arithMode())) {
3608             JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3609             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3610             numeratorPositive.link(&m_jit);
3611         }
3612     
3613         if (op1SaveGPR != op1GPR)
3614             unlock(op1SaveGPR);
3615             
3616         done.link(&m_jit);
3617         int32Result(edx.gpr(), node);
3618
3619 #elif HAVE(ARM_IDIV_INSTRUCTIONS) || CPU(ARM64)
3620         GPRTemporary temp(this);
3621         GPRTemporary quotientThenRemainder(this);
3622         GPRTemporary multiplyAnswer(this);
3623         GPRReg dividendGPR = op1.gpr();
3624         GPRReg divisorGPR = op2.gpr();
3625         GPRReg quotientThenRemainderGPR = quotientThenRemainder.gpr();
3626         GPRReg multiplyAnswerGPR = multiplyAnswer.gpr();
3627
3628         JITCompiler::JumpList done;
3629     
3630         if (shouldCheckOverflow(node->arithMode()))
3631             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, divisorGPR));
3632         else {
3633             JITCompiler::Jump denominatorNotZero = m_jit.branchTest32(JITCompiler::NonZero, divisorGPR);
3634             m_jit.move(divisorGPR, quotientThenRemainderGPR);
3635             done.append(m_jit.jump());
3636             denominatorNotZero.link(&m_jit);
3637         }
3638
3639         m_jit.assembler().sdiv<32>(quotientThenRemainderGPR, dividendGPR, divisorGPR);
3640         // FIXME: It seems like there are cases where we don't need this? What if we have
3641         // arithMode() == Arith::Unchecked?
3642         // https://bugs.webkit.org/show_bug.cgi?id=126444
3643         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotientThenRemainderGPR, divisorGPR, multiplyAnswerGPR));
3644 #if HAVE(ARM_IDIV_INSTRUCTIONS)
3645         m_jit.assembler().sub(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3646 #else
3647         m_jit.assembler().sub<32>(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3648 #endif
3649
3650         // If the user cares about negative zero, then speculate that we're not about
3651         // to produce negative zero.
3652         if (shouldCheckNegativeZero(node->arithMode())) {
3653             // Check that we're not about to create negative zero.
3654             JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3655             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, quotientThenRemainderGPR));
3656             numeratorPositive.link(&m_jit);
3657         }
3658
3659         done.link(&m_jit);
3660
3661         int32Result(quotientThenRemainderGPR, node);
3662 #else // not architecture that can do integer division
3663         RELEASE_ASSERT_NOT_REACHED();
3664 #endif
3665         return;
3666     }
3667         
3668     case DoubleRepUse: {
3669         SpeculateDoubleOperand op1(this, node->child1());
3670         SpeculateDoubleOperand op2(this, node->child2());
3671         
3672         FPRReg op1FPR = op1.fpr();
3673         FPRReg op2FPR = op2.fpr();
3674         
3675         flushRegisters();
3676         
3677         FPRResult result(this);
3678         
3679         callOperation(fmodAsDFGOperation, result.fpr(), op1FPR, op2FPR);
3680         
3681         doubleResult(result.fpr(), node);
3682         return;
3683     }
3684         
3685     default:
3686         RELEASE_ASSERT_NOT_REACHED();
3687         return;
3688     }
3689 }
3690
3691 void SpeculativeJIT::compileArithRound(Node* node)
3692 {
3693     ASSERT(node->child1().useKind() == DoubleRepUse);
3694
3695     SpeculateDoubleOperand value(this, node->child1());
3696     FPRReg valueFPR = value.fpr();
3697
3698     if (producesInteger(node->arithRoundingMode()) && !shouldCheckNegativeZero(node->arithRoundingMode())) {
3699         FPRTemporary oneHalf(this);
3700         GPRTemporary roundedResultAsInt32(this);
3701         FPRReg oneHalfFPR = oneHalf.fpr();
3702         GPRReg resultGPR = roundedResultAsInt32.gpr();
3703
3704         static const double halfConstant = 0.5;
3705         m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), oneHalfFPR);
3706         m_jit.addDouble(valueFPR, oneHalfFPR);
3707
3708         JITCompiler::Jump truncationFailed = m_jit.branchTruncateDoubleToInt32(oneHalfFPR, resultGPR);
3709         speculationCheck(Overflow, JSValueRegs(), node, truncationFailed);
3710         int32Result(resultGPR, node);
3711         return;
3712     }
3713
3714     flushRegisters();
3715     FPRResult roundedResultAsDouble(this);
3716     FPRReg resultFPR = roundedResultAsDouble.fpr();
3717     callOperation(jsRound, resultFPR, valueFPR);
3718     m_jit.exceptionCheck();
3719     if (producesInteger(node->arithRoundingMode())) {
3720         GPRTemporary roundedResultAsInt32(this);
3721         FPRTemporary scratch(this);
3722         FPRReg scratchFPR = scratch.fpr();
3723         GPRReg resultGPR = roundedResultAsInt32.gpr();
3724         JITCompiler::JumpList failureCases;
3725         m_jit.branchConvertDoubleToInt32(resultFPR, resultGPR, failureCases, scratchFPR);
3726         speculationCheck(Overflow, JSValueRegs(), node, failureCases);
3727
3728         int32Result(resultGPR, node);
3729     } else
3730         doubleResult(resultFPR, node);
3731 }
3732
3733 void SpeculativeJIT::compileArithSqrt(Node* node)
3734 {
3735     SpeculateDoubleOperand op1(this, node->child1());
3736     FPRReg op1FPR = op1.fpr();
3737
3738     if (!MacroAssembler::supportsFloatingPointSqrt() || !Options::enableArchitectureSpecificOptimizations()) {
3739         flushRegisters();
3740         FPRResult result(this);
3741         callOperation(sqrt, result.fpr(), op1FPR);
3742         doubleResult(result.fpr(), node);
3743     } else {
3744         FPRTemporary result(this, op1);
3745         m_jit.sqrtDouble(op1.fpr(), result.fpr());
3746         doubleResult(result.fpr(), node);
3747     }
3748 }
3749
3750 // For small positive integers , it is worth doing a tiny inline loop to exponentiate the base.
3751 // Every register is clobbered by this helper.
3752 static MacroAssembler::Jump compileArithPowIntegerFastPath(JITCompiler& assembler, FPRReg xOperand, GPRReg yOperand, FPRReg result)
3753 {
3754     MacroAssembler::JumpList skipFastPath;
3755     skipFastPath.append(assembler.branch32(MacroAssembler::LessThan, yOperand, MacroAssembler::TrustedImm32(0)));
3756     skipFastPath.append(assembler.branch32(MacroAssembler::GreaterThan, yOperand, MacroAssembler::TrustedImm32(1000)));
3757
3758     static const double oneConstant = 1.0;
3759     assembler.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant), result);
3760
3761     MacroAssembler::Label startLoop(assembler.label());
3762     MacroAssembler::Jump exponentIsEven = assembler.branchTest32(MacroAssembler::Zero, yOperand, MacroAssembler::TrustedImm32(1));
3763     assembler.mulDouble(xOperand, result);
3764     exponentIsEven.link(&assembler);
3765     assembler.mulDouble(xOperand, xOperand);
3766     assembler.rshift32(MacroAssembler::TrustedImm32(1), yOperand);
3767     assembler.branchTest32(MacroAssembler::NonZero, yOperand).linkTo(startLoop, &assembler);
3768
3769     MacroAssembler::Jump skipSlowPath = assembler.jump();
3770     skipFastPath.link(&assembler);
3771
3772     return skipSlowPath;
3773 }
3774
3775 void SpeculativeJIT::compileArithPow(Node* node)
3776 {
3777     if (node->child2().useKind() == Int32Use) {
3778         SpeculateDoubleOperand xOperand(this, node->child1());
3779         SpeculateInt32Operand yOperand(this, node->child2());
3780         FPRReg xOperandfpr = xOperand.fpr();
3781         GPRReg yOperandGpr = yOperand.gpr();
3782         FPRTemporary yOperandfpr(this);
3783
3784         flushRegisters();
3785
3786         FPRResult result(this);
3787         FPRReg resultFpr = result.fpr();
3788
3789         FPRTemporary xOperandCopy(this);
3790         FPRReg xOperandCopyFpr = xOperandCopy.fpr();
3791         m_jit.moveDouble(xOperandfpr, xOperandCopyFpr);
3792
3793         GPRTemporary counter(this);
3794         GPRReg counterGpr = counter.gpr();
3795         m_jit.move(yOperandGpr, counterGpr);