Node::origin should be able to tell you if it's OK to exit
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGOSRExitFuzz.h"
38 #include "DFGSaneStringGetByValSlowPathGenerator.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DirectArguments.h"
41 #include "JSArrowFunction.h"
42 #include "JSCInlines.h"
43 #include "JSEnvironmentRecord.h"
44 #include "JSLexicalEnvironment.h"
45 #include "LinkBuffer.h"
46 #include "ScopedArguments.h"
47 #include "ScratchRegisterAllocator.h"
48 #include "WriteBarrierBuffer.h"
49 #include <wtf/MathExtras.h>
50
51 namespace JSC { namespace DFG {
52
53 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
54     : m_compileOkay(true)
55     , m_jit(jit)
56     , m_currentNode(0)
57     , m_lastGeneratedNode(LastNodeType)
58     , m_indexInBlock(0)
59     , m_generationInfo(m_jit.graph().frameRegisterCount())
60     , m_state(m_jit.graph())
61     , m_interpreter(m_jit.graph(), m_state)
62     , m_stream(&jit.jitCode()->variableEventStream)
63     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
64 {
65 }
66
67 SpeculativeJIT::~SpeculativeJIT()
68 {
69 }
70
71 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
72 {
73     ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
74     
75     GPRTemporary scratch(this);
76     GPRTemporary scratch2(this);
77     GPRReg scratchGPR = scratch.gpr();
78     GPRReg scratch2GPR = scratch2.gpr();
79     
80     unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
81     
82     JITCompiler::JumpList slowCases;
83     
84     slowCases.append(
85         emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
86     m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
87     emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
88     
89     m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
90     m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
91     
92     if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
93 #if USE(JSVALUE64)
94         m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
95         for (unsigned i = numElements; i < vectorLength; ++i)
96             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
97 #else
98         EncodedValueDescriptor value;
99         value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
100         for (unsigned i = numElements; i < vectorLength; ++i) {
101             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
102             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
103         }
104 #endif
105     }
106     
107     // I want a slow path that also loads out the storage pointer, and that's
108     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
109     // of work for a very small piece of functionality. :-/
110     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
111         slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
112         structure, numElements));
113 }
114
115 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
116 {
117     if (inlineCallFrame && !inlineCallFrame->isVarargs())
118         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
119     else {
120         VirtualRegister argumentCountRegister;
121         if (!inlineCallFrame)
122             argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
123         else
124             argumentCountRegister = inlineCallFrame->argumentCountRegister;
125         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
126         if (!includeThis)
127             m_jit.sub32(TrustedImm32(1), lengthGPR);
128     }
129 }
130
131 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
132 {
133     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
134 }
135
136 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
137 {
138     if (origin.inlineCallFrame) {
139         if (origin.inlineCallFrame->isClosureCall) {
140             m_jit.loadPtr(
141                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
142                 calleeGPR);
143         } else {
144             m_jit.move(
145                 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
146                 calleeGPR);
147         }
148     } else
149         m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), calleeGPR);
150 }
151
152 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
153 {
154     m_jit.addPtr(
155         TrustedImm32(
156             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
157         GPRInfo::callFrameRegister, startGPR);
158 }
159
160 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
161 {
162     if (!doOSRExitFuzzing())
163         return MacroAssembler::Jump();
164     
165     MacroAssembler::Jump result;
166     
167     m_jit.pushToSave(GPRInfo::regT0);
168     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
169     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
170     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
171     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
172     unsigned at = Options::fireOSRExitFuzzAt();
173     if (at || atOrAfter) {
174         unsigned threshold;
175         MacroAssembler::RelationalCondition condition;
176         if (atOrAfter) {
177             threshold = atOrAfter;
178             condition = MacroAssembler::Below;
179         } else {
180             threshold = at;
181             condition = MacroAssembler::NotEqual;
182         }
183         MacroAssembler::Jump ok = m_jit.branch32(
184             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
185         m_jit.popToRestore(GPRInfo::regT0);
186         result = m_jit.jump();
187         ok.link(&m_jit);
188     }
189     m_jit.popToRestore(GPRInfo::regT0);
190     
191     return result;
192 }
193
194 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
195 {
196     if (!m_compileOkay)
197         return;
198     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
199     if (fuzzJump.isSet()) {
200         JITCompiler::JumpList jumpsToFail;
201         jumpsToFail.append(fuzzJump);
202         jumpsToFail.append(jumpToFail);
203         m_jit.appendExitInfo(jumpsToFail);
204     } else
205         m_jit.appendExitInfo(jumpToFail);
206     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
207 }
208
209 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
210 {
211     if (!m_compileOkay)
212         return;
213     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
214     if (fuzzJump.isSet()) {
215         JITCompiler::JumpList myJumpsToFail;
216         myJumpsToFail.append(jumpsToFail);
217         myJumpsToFail.append(fuzzJump);
218         m_jit.appendExitInfo(myJumpsToFail);
219     } else
220         m_jit.appendExitInfo(jumpsToFail);
221     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
222 }
223
224 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
225 {
226     if (!m_compileOkay)
227         return OSRExitJumpPlaceholder();
228     unsigned index = m_jit.jitCode()->osrExit.size();
229     m_jit.appendExitInfo();
230     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
231     return OSRExitJumpPlaceholder(index);
232 }
233
234 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
235 {
236     return speculationCheck(kind, jsValueSource, nodeUse.node());
237 }
238
239 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
240 {
241     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
242 }
243
244 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
245 {
246     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
247 }
248
249 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
250 {
251     if (!m_compileOkay)
252         return;
253     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
254     m_jit.appendExitInfo(jumpToFail);
255     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
256 }
257
258 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
259 {
260     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
261 }
262
263 void SpeculativeJIT::emitInvalidationPoint(Node* node)
264 {
265     if (!m_compileOkay)
266         return;
267     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
268     m_jit.jitCode()->appendOSRExit(OSRExit(
269         UncountableInvalidation, JSValueSource(),
270         m_jit.graph().methodOfGettingAValueProfileFor(node),
271         this, m_stream->size()));
272     info.m_replacementSource = m_jit.watchpointLabel();
273     ASSERT(info.m_replacementSource.isSet());
274     noResult(node);
275 }
276
277 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
278 {
279     if (!m_compileOkay)
280         return;
281     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
282     m_compileOkay = false;
283     if (verboseCompilationEnabled())
284         dataLog("Bailing compilation.\n");
285 }
286
287 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
288 {
289     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
290 }
291
292 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
293 {
294     ASSERT(needsTypeCheck(edge, typesPassedThrough));
295     m_interpreter.filter(edge, typesPassedThrough);
296     speculationCheck(BadType, source, edge.node(), jumpToFail);
297 }
298
299 RegisterSet SpeculativeJIT::usedRegisters()
300 {
301     RegisterSet result;
302     
303     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
304         GPRReg gpr = GPRInfo::toRegister(i);
305         if (m_gprs.isInUse(gpr))
306             result.set(gpr);
307     }
308     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
309         FPRReg fpr = FPRInfo::toRegister(i);
310         if (m_fprs.isInUse(fpr))
311             result.set(fpr);
312     }
313     
314     result.merge(RegisterSet::specialRegisters());
315     
316     return result;
317 }
318
319 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
320 {
321     m_slowPathGenerators.append(WTF::move(slowPathGenerator));
322 }
323
324 void SpeculativeJIT::runSlowPathGenerators()
325 {
326     for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
327         m_slowPathGenerators[i]->generate(this);
328 }
329
330 // On Windows we need to wrap fmod; on other platforms we can call it directly.
331 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
332 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
333 static double JIT_OPERATION fmodAsDFGOperation(double x, double y)
334 {
335     return fmod(x, y);
336 }
337 #else
338 #define fmodAsDFGOperation fmod
339 #endif
340
341 void SpeculativeJIT::clearGenerationInfo()
342 {
343     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
344         m_generationInfo[i] = GenerationInfo();
345     m_gprs = RegisterBank<GPRInfo>();
346     m_fprs = RegisterBank<FPRInfo>();
347 }
348
349 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
350 {
351     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
352     Node* node = info.node();
353     DataFormat registerFormat = info.registerFormat();
354     ASSERT(registerFormat != DataFormatNone);
355     ASSERT(registerFormat != DataFormatDouble);
356         
357     SilentSpillAction spillAction;
358     SilentFillAction fillAction;
359         
360     if (!info.needsSpill())
361         spillAction = DoNothingForSpill;
362     else {
363 #if USE(JSVALUE64)
364         ASSERT(info.gpr() == source);
365         if (registerFormat == DataFormatInt32)
366             spillAction = Store32Payload;
367         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
368             spillAction = StorePtr;
369         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
370             spillAction = Store64;
371         else {
372             ASSERT(registerFormat & DataFormatJS);
373             spillAction = Store64;
374         }
375 #elif USE(JSVALUE32_64)
376         if (registerFormat & DataFormatJS) {
377             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
378             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
379         } else {
380             ASSERT(info.gpr() == source);
381             spillAction = Store32Payload;
382         }
383 #endif
384     }
385         
386     if (registerFormat == DataFormatInt32) {
387         ASSERT(info.gpr() == source);
388         ASSERT(isJSInt32(info.registerFormat()));
389         if (node->hasConstant()) {
390             ASSERT(node->isInt32Constant());
391             fillAction = SetInt32Constant;
392         } else
393             fillAction = Load32Payload;
394     } else if (registerFormat == DataFormatBoolean) {
395 #if USE(JSVALUE64)
396         RELEASE_ASSERT_NOT_REACHED();
397 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
398         fillAction = DoNothingForFill;
399 #endif
400 #elif USE(JSVALUE32_64)
401         ASSERT(info.gpr() == source);
402         if (node->hasConstant()) {
403             ASSERT(node->isBooleanConstant());
404             fillAction = SetBooleanConstant;
405         } else
406             fillAction = Load32Payload;
407 #endif
408     } else if (registerFormat == DataFormatCell) {
409         ASSERT(info.gpr() == source);
410         if (node->hasConstant()) {
411             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
412             node->asCell(); // To get the assertion.
413             fillAction = SetCellConstant;
414         } else {
415 #if USE(JSVALUE64)
416             fillAction = LoadPtr;
417 #else
418             fillAction = Load32Payload;
419 #endif
420         }
421     } else if (registerFormat == DataFormatStorage) {
422         ASSERT(info.gpr() == source);
423         fillAction = LoadPtr;
424     } else if (registerFormat == DataFormatInt52) {
425         if (node->hasConstant())
426             fillAction = SetInt52Constant;
427         else if (info.spillFormat() == DataFormatInt52)
428             fillAction = Load64;
429         else if (info.spillFormat() == DataFormatStrictInt52)
430             fillAction = Load64ShiftInt52Left;
431         else if (info.spillFormat() == DataFormatNone)
432             fillAction = Load64;
433         else {
434             RELEASE_ASSERT_NOT_REACHED();
435 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
436             fillAction = Load64; // Make GCC happy.
437 #endif
438         }
439     } else if (registerFormat == DataFormatStrictInt52) {
440         if (node->hasConstant())
441             fillAction = SetStrictInt52Constant;
442         else if (info.spillFormat() == DataFormatInt52)
443             fillAction = Load64ShiftInt52Right;
444         else if (info.spillFormat() == DataFormatStrictInt52)
445             fillAction = Load64;
446         else if (info.spillFormat() == DataFormatNone)
447             fillAction = Load64;
448         else {
449             RELEASE_ASSERT_NOT_REACHED();
450 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
451             fillAction = Load64; // Make GCC happy.
452 #endif
453         }
454     } else {
455         ASSERT(registerFormat & DataFormatJS);
456 #if USE(JSVALUE64)
457         ASSERT(info.gpr() == source);
458         if (node->hasConstant()) {
459             if (node->isCellConstant())
460                 fillAction = SetTrustedJSConstant;
461             else
462                 fillAction = SetJSConstant;
463         } else if (info.spillFormat() == DataFormatInt32) {
464             ASSERT(registerFormat == DataFormatJSInt32);
465             fillAction = Load32PayloadBoxInt;
466         } else
467             fillAction = Load64;
468 #else
469         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
470         if (node->hasConstant())
471             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
472         else if (info.payloadGPR() == source)
473             fillAction = Load32Payload;
474         else { // Fill the Tag
475             switch (info.spillFormat()) {
476             case DataFormatInt32:
477                 ASSERT(registerFormat == DataFormatJSInt32);
478                 fillAction = SetInt32Tag;
479                 break;
480             case DataFormatCell:
481                 ASSERT(registerFormat == DataFormatJSCell);
482                 fillAction = SetCellTag;
483                 break;
484             case DataFormatBoolean:
485                 ASSERT(registerFormat == DataFormatJSBoolean);
486                 fillAction = SetBooleanTag;
487                 break;
488             default:
489                 fillAction = Load32Tag;
490                 break;
491             }
492         }
493 #endif
494     }
495         
496     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
497 }
498     
499 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
500 {
501     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
502     Node* node = info.node();
503     ASSERT(info.registerFormat() == DataFormatDouble);
504
505     SilentSpillAction spillAction;
506     SilentFillAction fillAction;
507         
508     if (!info.needsSpill())
509         spillAction = DoNothingForSpill;
510     else {
511         ASSERT(!node->hasConstant());
512         ASSERT(info.spillFormat() == DataFormatNone);
513         ASSERT(info.fpr() == source);
514         spillAction = StoreDouble;
515     }
516         
517 #if USE(JSVALUE64)
518     if (node->hasConstant()) {
519         node->asNumber(); // To get the assertion.
520         fillAction = SetDoubleConstant;
521     } else {
522         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
523         fillAction = LoadDouble;
524     }
525 #elif USE(JSVALUE32_64)
526     ASSERT(info.registerFormat() == DataFormatDouble);
527     if (node->hasConstant()) {
528         node->asNumber(); // To get the assertion.
529         fillAction = SetDoubleConstant;
530     } else
531         fillAction = LoadDouble;
532 #endif
533
534     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
535 }
536     
537 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
538 {
539     switch (plan.spillAction()) {
540     case DoNothingForSpill:
541         break;
542     case Store32Tag:
543         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
544         break;
545     case Store32Payload:
546         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
547         break;
548     case StorePtr:
549         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
550         break;
551 #if USE(JSVALUE64)
552     case Store64:
553         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
554         break;
555 #endif
556     case StoreDouble:
557         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
558         break;
559     default:
560         RELEASE_ASSERT_NOT_REACHED();
561     }
562 }
563     
564 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
565 {
566 #if USE(JSVALUE32_64)
567     UNUSED_PARAM(canTrample);
568 #endif
569     switch (plan.fillAction()) {
570     case DoNothingForFill:
571         break;
572     case SetInt32Constant:
573         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
574         break;
575 #if USE(JSVALUE64)
576     case SetInt52Constant:
577         m_jit.move(Imm64(plan.node()->asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
578         break;
579     case SetStrictInt52Constant:
580         m_jit.move(Imm64(plan.node()->asMachineInt()), plan.gpr());
581         break;
582 #endif // USE(JSVALUE64)
583     case SetBooleanConstant:
584         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
585         break;
586     case SetCellConstant:
587         m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
588         break;
589 #if USE(JSVALUE64)
590     case SetTrustedJSConstant:
591         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
592         break;
593     case SetJSConstant:
594         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
595         break;
596     case SetDoubleConstant:
597         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
598         m_jit.move64ToDouble(canTrample, plan.fpr());
599         break;
600     case Load32PayloadBoxInt:
601         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
602         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
603         break;
604     case Load32PayloadConvertToInt52:
605         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
606         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
607         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
608         break;
609     case Load32PayloadSignExtend:
610         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
611         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
612         break;
613 #else
614     case SetJSConstantTag:
615         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
616         break;
617     case SetJSConstantPayload:
618         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
619         break;
620     case SetInt32Tag:
621         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
622         break;
623     case SetCellTag:
624         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
625         break;
626     case SetBooleanTag:
627         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
628         break;
629     case SetDoubleConstant:
630         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
631         break;
632 #endif
633     case Load32Tag:
634         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
635         break;
636     case Load32Payload:
637         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
638         break;
639     case LoadPtr:
640         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
641         break;
642 #if USE(JSVALUE64)
643     case Load64:
644         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
645         break;
646     case Load64ShiftInt52Right:
647         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
648         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
649         break;
650     case Load64ShiftInt52Left:
651         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
652         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
653         break;
654 #endif
655     case LoadDouble:
656         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
657         break;
658     default:
659         RELEASE_ASSERT_NOT_REACHED();
660     }
661 }
662     
663 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
664 {
665     switch (arrayMode.arrayClass()) {
666     case Array::OriginalArray: {
667         CRASH();
668 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
669         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
670         return result;
671 #endif
672     }
673         
674     case Array::Array:
675         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
676         return m_jit.branch32(
677             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
678         
679     case Array::NonArray:
680     case Array::OriginalNonArray:
681         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
682         return m_jit.branch32(
683             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
684         
685     case Array::PossiblyArray:
686         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
687         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
688     }
689     
690     RELEASE_ASSERT_NOT_REACHED();
691     return JITCompiler::Jump();
692 }
693
694 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
695 {
696     JITCompiler::JumpList result;
697     
698     switch (arrayMode.type()) {
699     case Array::Int32:
700         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
701
702     case Array::Double:
703         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
704
705     case Array::Contiguous:
706         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
707
708     case Array::Undecided:
709         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
710
711     case Array::ArrayStorage:
712     case Array::SlowPutArrayStorage: {
713         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
714         
715         if (arrayMode.isJSArray()) {
716             if (arrayMode.isSlowPut()) {
717                 result.append(
718                     m_jit.branchTest32(
719                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
720                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
721                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
722                 result.append(
723                     m_jit.branch32(
724                         MacroAssembler::Above, tempGPR,
725                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
726                 break;
727             }
728             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
729             result.append(
730                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
731             break;
732         }
733         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
734         if (arrayMode.isSlowPut()) {
735             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
736             result.append(
737                 m_jit.branch32(
738                     MacroAssembler::Above, tempGPR,
739                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
740             break;
741         }
742         result.append(
743             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
744         break;
745     }
746     default:
747         CRASH();
748         break;
749     }
750     
751     return result;
752 }
753
754 void SpeculativeJIT::checkArray(Node* node)
755 {
756     ASSERT(node->arrayMode().isSpecific());
757     ASSERT(!node->arrayMode().doesConversion());
758     
759     SpeculateCellOperand base(this, node->child1());
760     GPRReg baseReg = base.gpr();
761     
762     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
763         noResult(m_currentNode);
764         return;
765     }
766     
767     const ClassInfo* expectedClassInfo = 0;
768     
769     switch (node->arrayMode().type()) {
770     case Array::String:
771         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
772         break;
773     case Array::Int32:
774     case Array::Double:
775     case Array::Contiguous:
776     case Array::Undecided:
777     case Array::ArrayStorage:
778     case Array::SlowPutArrayStorage: {
779         GPRTemporary temp(this);
780         GPRReg tempGPR = temp.gpr();
781         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
782         speculationCheck(
783             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
784             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
785         
786         noResult(m_currentNode);
787         return;
788     }
789     case Array::DirectArguments:
790         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
791         noResult(m_currentNode);
792         return;
793     case Array::ScopedArguments:
794         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
795         noResult(m_currentNode);
796         return;
797     default:
798         speculateCellTypeWithoutTypeFiltering(
799             node->child1(), baseReg,
800             typeForTypedArrayType(node->arrayMode().typedArrayType()));
801         noResult(m_currentNode);
802         return;
803     }
804     
805     RELEASE_ASSERT(expectedClassInfo);
806     
807     GPRTemporary temp(this);
808     GPRTemporary temp2(this);
809     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
810     speculationCheck(
811         BadType, JSValueSource::unboxedCell(baseReg), node,
812         m_jit.branchPtr(
813             MacroAssembler::NotEqual,
814             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
815             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
816     
817     noResult(m_currentNode);
818 }
819
820 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
821 {
822     ASSERT(node->arrayMode().doesConversion());
823     
824     GPRTemporary temp(this);
825     GPRTemporary structure;
826     GPRReg tempGPR = temp.gpr();
827     GPRReg structureGPR = InvalidGPRReg;
828     
829     if (node->op() != ArrayifyToStructure) {
830         GPRTemporary realStructure(this);
831         structure.adopt(realStructure);
832         structureGPR = structure.gpr();
833     }
834         
835     // We can skip all that comes next if we already have array storage.
836     MacroAssembler::JumpList slowPath;
837     
838     if (node->op() == ArrayifyToStructure) {
839         slowPath.append(m_jit.branchWeakStructure(
840             JITCompiler::NotEqual,
841             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
842             node->structure()));
843     } else {
844         m_jit.load8(
845             MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
846         
847         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
848     }
849     
850     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
851         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
852     
853     noResult(m_currentNode);
854 }
855
856 void SpeculativeJIT::arrayify(Node* node)
857 {
858     ASSERT(node->arrayMode().isSpecific());
859     
860     SpeculateCellOperand base(this, node->child1());
861     
862     if (!node->child2()) {
863         arrayify(node, base.gpr(), InvalidGPRReg);
864         return;
865     }
866     
867     SpeculateInt32Operand property(this, node->child2());
868     
869     arrayify(node, base.gpr(), property.gpr());
870 }
871
872 GPRReg SpeculativeJIT::fillStorage(Edge edge)
873 {
874     VirtualRegister virtualRegister = edge->virtualRegister();
875     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
876     
877     switch (info.registerFormat()) {
878     case DataFormatNone: {
879         if (info.spillFormat() == DataFormatStorage) {
880             GPRReg gpr = allocate();
881             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
882             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
883             info.fillStorage(*m_stream, gpr);
884             return gpr;
885         }
886         
887         // Must be a cell; fill it as a cell and then return the pointer.
888         return fillSpeculateCell(edge);
889     }
890         
891     case DataFormatStorage: {
892         GPRReg gpr = info.gpr();
893         m_gprs.lock(gpr);
894         return gpr;
895     }
896         
897     default:
898         return fillSpeculateCell(edge);
899     }
900 }
901
902 void SpeculativeJIT::useChildren(Node* node)
903 {
904     if (node->flags() & NodeHasVarArgs) {
905         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
906             if (!!m_jit.graph().m_varArgChildren[childIdx])
907                 use(m_jit.graph().m_varArgChildren[childIdx]);
908         }
909     } else {
910         Edge child1 = node->child1();
911         if (!child1) {
912             ASSERT(!node->child2() && !node->child3());
913             return;
914         }
915         use(child1);
916         
917         Edge child2 = node->child2();
918         if (!child2) {
919             ASSERT(!node->child3());
920             return;
921         }
922         use(child2);
923         
924         Edge child3 = node->child3();
925         if (!child3)
926             return;
927         use(child3);
928     }
929 }
930
931 void SpeculativeJIT::compileIn(Node* node)
932 {
933     SpeculateCellOperand base(this, node->child2());
934     GPRReg baseGPR = base.gpr();
935     
936     if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
937         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
938             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo();
939             
940             GPRTemporary result(this);
941             GPRReg resultGPR = result.gpr();
942
943             use(node->child1());
944             
945             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
946             MacroAssembler::Label done = m_jit.label();
947             
948             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
949             // we can cast it to const AtomicStringImpl* safely.
950             auto slowPath = slowPathCall(
951                 jump.m_jump, this, operationInOptimize,
952                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
953                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
954             
955             stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
956             stubInfo->codeOrigin = node->origin.semantic;
957             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
958             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
959             stubInfo->patch.usedRegisters = usedRegisters();
960             stubInfo->patch.spillMode = NeedToSpill;
961
962             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
963             addSlowPathGenerator(WTF::move(slowPath));
964
965             base.use();
966
967             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
968             return;
969         }
970     }
971
972     JSValueOperand key(this, node->child1());
973     JSValueRegs regs = key.jsValueRegs();
974         
975     GPRFlushedCallResult result(this);
976     GPRReg resultGPR = result.gpr();
977         
978     base.use();
979     key.use();
980         
981     flushRegisters();
982     callOperation(
983         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
984         baseGPR, regs);
985     m_jit.exceptionCheck();
986     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
987 }
988
989 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
990 {
991     unsigned branchIndexInBlock = detectPeepHoleBranch();
992     if (branchIndexInBlock != UINT_MAX) {
993         Node* branchNode = m_block->at(branchIndexInBlock);
994
995         ASSERT(node->adjustedRefCount() == 1);
996         
997         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
998     
999         m_indexInBlock = branchIndexInBlock;
1000         m_currentNode = branchNode;
1001         
1002         return true;
1003     }
1004     
1005     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1006     
1007     return false;
1008 }
1009
1010 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1011 {
1012     unsigned branchIndexInBlock = detectPeepHoleBranch();
1013     if (branchIndexInBlock != UINT_MAX) {
1014         Node* branchNode = m_block->at(branchIndexInBlock);
1015
1016         ASSERT(node->adjustedRefCount() == 1);
1017         
1018         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1019     
1020         m_indexInBlock = branchIndexInBlock;
1021         m_currentNode = branchNode;
1022         
1023         return true;
1024     }
1025     
1026     nonSpeculativeNonPeepholeStrictEq(node, invert);
1027     
1028     return false;
1029 }
1030
1031 static const char* dataFormatString(DataFormat format)
1032 {
1033     // These values correspond to the DataFormat enum.
1034     const char* strings[] = {
1035         "[  ]",
1036         "[ i]",
1037         "[ d]",
1038         "[ c]",
1039         "Err!",
1040         "Err!",
1041         "Err!",
1042         "Err!",
1043         "[J ]",
1044         "[Ji]",
1045         "[Jd]",
1046         "[Jc]",
1047         "Err!",
1048         "Err!",
1049         "Err!",
1050         "Err!",
1051     };
1052     return strings[format];
1053 }
1054
1055 void SpeculativeJIT::dump(const char* label)
1056 {
1057     if (label)
1058         dataLogF("<%s>\n", label);
1059
1060     dataLogF("  gprs:\n");
1061     m_gprs.dump();
1062     dataLogF("  fprs:\n");
1063     m_fprs.dump();
1064     dataLogF("  VirtualRegisters:\n");
1065     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1066         GenerationInfo& info = m_generationInfo[i];
1067         if (info.alive())
1068             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1069         else
1070             dataLogF("    % 3d:[__][__]", i);
1071         if (info.registerFormat() == DataFormatDouble)
1072             dataLogF(":fpr%d\n", info.fpr());
1073         else if (info.registerFormat() != DataFormatNone
1074 #if USE(JSVALUE32_64)
1075             && !(info.registerFormat() & DataFormatJS)
1076 #endif
1077             ) {
1078             ASSERT(info.gpr() != InvalidGPRReg);
1079             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1080         } else
1081             dataLogF("\n");
1082     }
1083     if (label)
1084         dataLogF("</%s>\n", label);
1085 }
1086
1087 GPRTemporary::GPRTemporary()
1088     : m_jit(0)
1089     , m_gpr(InvalidGPRReg)
1090 {
1091 }
1092
1093 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1094     : m_jit(jit)
1095     , m_gpr(InvalidGPRReg)
1096 {
1097     m_gpr = m_jit->allocate();
1098 }
1099
1100 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1101     : m_jit(jit)
1102     , m_gpr(InvalidGPRReg)
1103 {
1104     m_gpr = m_jit->allocate(specific);
1105 }
1106
1107 #if USE(JSVALUE32_64)
1108 GPRTemporary::GPRTemporary(
1109     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1110     : m_jit(jit)
1111     , m_gpr(InvalidGPRReg)
1112 {
1113     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1114         m_gpr = m_jit->reuse(op1.gpr(which));
1115     else
1116         m_gpr = m_jit->allocate();
1117 }
1118 #endif // USE(JSVALUE32_64)
1119
1120 JSValueRegsTemporary::JSValueRegsTemporary() { }
1121
1122 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1123 #if USE(JSVALUE64)
1124     : m_gpr(jit)
1125 #else
1126     : m_payloadGPR(jit)
1127     , m_tagGPR(jit)
1128 #endif
1129 {
1130 }
1131
1132 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1133
1134 JSValueRegs JSValueRegsTemporary::regs()
1135 {
1136 #if USE(JSVALUE64)
1137     return JSValueRegs(m_gpr.gpr());
1138 #else
1139     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1140 #endif
1141 }
1142
1143 void GPRTemporary::adopt(GPRTemporary& other)
1144 {
1145     ASSERT(!m_jit);
1146     ASSERT(m_gpr == InvalidGPRReg);
1147     ASSERT(other.m_jit);
1148     ASSERT(other.m_gpr != InvalidGPRReg);
1149     m_jit = other.m_jit;
1150     m_gpr = other.m_gpr;
1151     other.m_jit = 0;
1152     other.m_gpr = InvalidGPRReg;
1153 }
1154
1155 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1156     : m_jit(jit)
1157     , m_fpr(InvalidFPRReg)
1158 {
1159     m_fpr = m_jit->fprAllocate();
1160 }
1161
1162 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1163     : m_jit(jit)
1164     , m_fpr(InvalidFPRReg)
1165 {
1166     if (m_jit->canReuse(op1.node()))
1167         m_fpr = m_jit->reuse(op1.fpr());
1168     else
1169         m_fpr = m_jit->fprAllocate();
1170 }
1171
1172 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1173     : m_jit(jit)
1174     , m_fpr(InvalidFPRReg)
1175 {
1176     if (m_jit->canReuse(op1.node()))
1177         m_fpr = m_jit->reuse(op1.fpr());
1178     else if (m_jit->canReuse(op2.node()))
1179         m_fpr = m_jit->reuse(op2.fpr());
1180     else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1181         m_fpr = m_jit->reuse(op1.fpr());
1182     else
1183         m_fpr = m_jit->fprAllocate();
1184 }
1185
1186 #if USE(JSVALUE32_64)
1187 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1188     : m_jit(jit)
1189     , m_fpr(InvalidFPRReg)
1190 {
1191     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1192         m_fpr = m_jit->reuse(op1.fpr());
1193     else
1194         m_fpr = m_jit->fprAllocate();
1195 }
1196 #endif
1197
1198 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1199 {
1200     BasicBlock* taken = branchNode->branchData()->taken.block;
1201     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1202     
1203     SpeculateDoubleOperand op1(this, node->child1());
1204     SpeculateDoubleOperand op2(this, node->child2());
1205     
1206     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1207     jump(notTaken);
1208 }
1209
1210 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1211 {
1212     BasicBlock* taken = branchNode->branchData()->taken.block;
1213     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1214
1215     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1216     
1217     if (taken == nextBlock()) {
1218         condition = MacroAssembler::NotEqual;
1219         BasicBlock* tmp = taken;
1220         taken = notTaken;
1221         notTaken = tmp;
1222     }
1223
1224     SpeculateCellOperand op1(this, node->child1());
1225     SpeculateCellOperand op2(this, node->child2());
1226     
1227     GPRReg op1GPR = op1.gpr();
1228     GPRReg op2GPR = op2.gpr();
1229     
1230     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1231         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1232             speculationCheck(
1233                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1234         }
1235         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1236             speculationCheck(
1237                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1238         }
1239     } else {
1240         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1241             speculationCheck(
1242                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1243                 m_jit.branchIfNotObject(op1GPR));
1244         }
1245         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1246             m_jit.branchTest8(
1247                 MacroAssembler::NonZero, 
1248                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1249                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1250
1251         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1252             speculationCheck(
1253                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1254                 m_jit.branchIfNotObject(op2GPR));
1255         }
1256         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1257             m_jit.branchTest8(
1258                 MacroAssembler::NonZero, 
1259                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1260                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1261     }
1262
1263     branchPtr(condition, op1GPR, op2GPR, taken);
1264     jump(notTaken);
1265 }
1266
1267 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1268 {
1269     BasicBlock* taken = branchNode->branchData()->taken.block;
1270     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1271
1272     // The branch instruction will branch to the taken block.
1273     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1274     if (taken == nextBlock()) {
1275         condition = JITCompiler::invert(condition);
1276         BasicBlock* tmp = taken;
1277         taken = notTaken;
1278         notTaken = tmp;
1279     }
1280
1281     if (node->child1()->isBooleanConstant()) {
1282         bool imm = node->child1()->asBoolean();
1283         SpeculateBooleanOperand op2(this, node->child2());
1284         branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1285     } else if (node->child2()->isBooleanConstant()) {
1286         SpeculateBooleanOperand op1(this, node->child1());
1287         bool imm = node->child2()->asBoolean();
1288         branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1289     } else {
1290         SpeculateBooleanOperand op1(this, node->child1());
1291         SpeculateBooleanOperand op2(this, node->child2());
1292         branch32(condition, op1.gpr(), op2.gpr(), taken);
1293     }
1294
1295     jump(notTaken);
1296 }
1297
1298 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1299 {
1300     BasicBlock* taken = branchNode->branchData()->taken.block;
1301     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1302
1303     // The branch instruction will branch to the taken block.
1304     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1305     if (taken == nextBlock()) {
1306         condition = JITCompiler::invert(condition);
1307         BasicBlock* tmp = taken;
1308         taken = notTaken;
1309         notTaken = tmp;
1310     }
1311
1312     if (node->child1()->isInt32Constant()) {
1313         int32_t imm = node->child1()->asInt32();
1314         SpeculateInt32Operand op2(this, node->child2());
1315         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1316     } else if (node->child2()->isInt32Constant()) {
1317         SpeculateInt32Operand op1(this, node->child1());
1318         int32_t imm = node->child2()->asInt32();
1319         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1320     } else {
1321         SpeculateInt32Operand op1(this, node->child1());
1322         SpeculateInt32Operand op2(this, node->child2());
1323         branch32(condition, op1.gpr(), op2.gpr(), taken);
1324     }
1325
1326     jump(notTaken);
1327 }
1328
1329 // Returns true if the compare is fused with a subsequent branch.
1330 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1331 {
1332     // Fused compare & branch.
1333     unsigned branchIndexInBlock = detectPeepHoleBranch();
1334     if (branchIndexInBlock != UINT_MAX) {
1335         Node* branchNode = m_block->at(branchIndexInBlock);
1336
1337         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1338         // so can be no intervening nodes to also reference the compare. 
1339         ASSERT(node->adjustedRefCount() == 1);
1340
1341         if (node->isBinaryUseKind(Int32Use))
1342             compilePeepHoleInt32Branch(node, branchNode, condition);
1343 #if USE(JSVALUE64)
1344         else if (node->isBinaryUseKind(Int52RepUse))
1345             compilePeepHoleInt52Branch(node, branchNode, condition);
1346 #endif // USE(JSVALUE64)
1347         else if (node->isBinaryUseKind(DoubleRepUse))
1348             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1349         else if (node->op() == CompareEq) {
1350             if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1351                 // Use non-peephole comparison, for now.
1352                 return false;
1353             }
1354             if (node->isBinaryUseKind(BooleanUse))
1355                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1356             else if (node->isBinaryUseKind(ObjectUse))
1357                 compilePeepHoleObjectEquality(node, branchNode);
1358             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1359                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1360             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1361                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1362             else if (!needsTypeCheck(node->child1(), SpecOther))
1363                 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1364             else if (!needsTypeCheck(node->child2(), SpecOther))
1365                 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1366             else {
1367                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1368                 return true;
1369             }
1370         } else {
1371             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1372             return true;
1373         }
1374
1375         use(node->child1());
1376         use(node->child2());
1377         m_indexInBlock = branchIndexInBlock;
1378         m_currentNode = branchNode;
1379         return true;
1380     }
1381     return false;
1382 }
1383
1384 void SpeculativeJIT::noticeOSRBirth(Node* node)
1385 {
1386     if (!node->hasVirtualRegister())
1387         return;
1388     
1389     VirtualRegister virtualRegister = node->virtualRegister();
1390     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1391     
1392     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1393 }
1394
1395 void SpeculativeJIT::compileMovHint(Node* node)
1396 {
1397     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1398     
1399     Node* child = node->child1().node();
1400     noticeOSRBirth(child);
1401     
1402     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1403 }
1404
1405 void SpeculativeJIT::bail(AbortReason reason)
1406 {
1407     if (verboseCompilationEnabled())
1408         dataLog("Bailing compilation.\n");
1409     m_compileOkay = true;
1410     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1411     clearGenerationInfo();
1412 }
1413
1414 void SpeculativeJIT::compileCurrentBlock()
1415 {
1416     ASSERT(m_compileOkay);
1417     
1418     if (!m_block)
1419         return;
1420     
1421     ASSERT(m_block->isReachable);
1422     
1423     m_jit.blockHeads()[m_block->index] = m_jit.label();
1424
1425     if (!m_block->intersectionOfCFAHasVisited) {
1426         // Don't generate code for basic blocks that are unreachable according to CFA.
1427         // But to be sure that nobody has generated a jump to this block, drop in a
1428         // breakpoint here.
1429         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1430         return;
1431     }
1432
1433     m_stream->appendAndLog(VariableEvent::reset());
1434     
1435     m_jit.jitAssertHasValidCallFrame();
1436     m_jit.jitAssertTagsInPlace();
1437     m_jit.jitAssertArgumentCountSane();
1438
1439     m_state.reset();
1440     m_state.beginBasicBlock(m_block);
1441     
1442     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1443         int operand = m_block->variablesAtHead.operandForIndex(i);
1444         Node* node = m_block->variablesAtHead[i];
1445         if (!node)
1446             continue; // No need to record dead SetLocal's.
1447         
1448         VariableAccessData* variable = node->variableAccessData();
1449         DataFormat format;
1450         if (!node->refCount())
1451             continue; // No need to record dead SetLocal's.
1452         format = dataFormatFor(variable->flushFormat());
1453         m_stream->appendAndLog(
1454             VariableEvent::setLocal(
1455                 VirtualRegister(operand),
1456                 variable->machineLocal(),
1457                 format));
1458     }
1459
1460     m_origin = NodeOrigin();
1461     
1462     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1463         m_currentNode = m_block->at(m_indexInBlock);
1464         
1465         // We may have hit a contradiction that the CFA was aware of but that the JIT
1466         // didn't cause directly.
1467         if (!m_state.isValid()) {
1468             bail(DFGBailedAtTopOfBlock);
1469             return;
1470         }
1471
1472         m_interpreter.startExecuting();
1473         m_jit.setForNode(m_currentNode);
1474         m_origin = m_currentNode->origin;
1475         if (validationEnabled())
1476             m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1477         m_lastGeneratedNode = m_currentNode->op();
1478         
1479         ASSERT(m_currentNode->shouldGenerate());
1480         
1481         if (verboseCompilationEnabled()) {
1482             dataLogF(
1483                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1484                 (int)m_currentNode->index(),
1485                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1486             dataLog("\n");
1487         }
1488         
1489         m_jit.jitAssertNoException();
1490
1491         compile(m_currentNode);
1492         
1493         if (belongsInMinifiedGraph(m_currentNode->op()))
1494             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1495         
1496 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1497         m_jit.clearRegisterAllocationOffsets();
1498 #endif
1499         
1500         if (!m_compileOkay) {
1501             bail(DFGBailedAtEndOfNode);
1502             return;
1503         }
1504         
1505         // Make sure that the abstract state is rematerialized for the next node.
1506         m_interpreter.executeEffects(m_indexInBlock);
1507     }
1508     
1509     // Perform the most basic verification that children have been used correctly.
1510     if (!ASSERT_DISABLED) {
1511         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1512             GenerationInfo& info = m_generationInfo[index];
1513             RELEASE_ASSERT(!info.alive());
1514         }
1515     }
1516 }
1517
1518 // If we are making type predictions about our arguments then
1519 // we need to check that they are correct on function entry.
1520 void SpeculativeJIT::checkArgumentTypes()
1521 {
1522     ASSERT(!m_currentNode);
1523     m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1524
1525     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1526         Node* node = m_jit.graph().m_arguments[i];
1527         if (!node) {
1528             // The argument is dead. We don't do any checks for such arguments.
1529             continue;
1530         }
1531         
1532         ASSERT(node->op() == SetArgument);
1533         ASSERT(node->shouldGenerate());
1534
1535         VariableAccessData* variableAccessData = node->variableAccessData();
1536         FlushFormat format = variableAccessData->flushFormat();
1537         
1538         if (format == FlushedJSValue)
1539             continue;
1540         
1541         VirtualRegister virtualRegister = variableAccessData->local();
1542
1543         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1544         
1545 #if USE(JSVALUE64)
1546         switch (format) {
1547         case FlushedInt32: {
1548             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1549             break;
1550         }
1551         case FlushedBoolean: {
1552             GPRTemporary temp(this);
1553             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1554             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1555             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1556             break;
1557         }
1558         case FlushedCell: {
1559             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1560             break;
1561         }
1562         default:
1563             RELEASE_ASSERT_NOT_REACHED();
1564             break;
1565         }
1566 #else
1567         switch (format) {
1568         case FlushedInt32: {
1569             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1570             break;
1571         }
1572         case FlushedBoolean: {
1573             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1574             break;
1575         }
1576         case FlushedCell: {
1577             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1578             break;
1579         }
1580         default:
1581             RELEASE_ASSERT_NOT_REACHED();
1582             break;
1583         }
1584 #endif
1585     }
1586
1587     m_origin = NodeOrigin();
1588 }
1589
1590 bool SpeculativeJIT::compile()
1591 {
1592     checkArgumentTypes();
1593     
1594     ASSERT(!m_currentNode);
1595     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1596         m_jit.setForBlockIndex(blockIndex);
1597         m_block = m_jit.graph().block(blockIndex);
1598         compileCurrentBlock();
1599     }
1600     linkBranches();
1601     return true;
1602 }
1603
1604 void SpeculativeJIT::createOSREntries()
1605 {
1606     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1607         BasicBlock* block = m_jit.graph().block(blockIndex);
1608         if (!block)
1609             continue;
1610         if (!block->isOSRTarget)
1611             continue;
1612         
1613         // Currently we don't have OSR entry trampolines. We could add them
1614         // here if need be.
1615         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1616     }
1617 }
1618
1619 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1620 {
1621     unsigned osrEntryIndex = 0;
1622     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1623         BasicBlock* block = m_jit.graph().block(blockIndex);
1624         if (!block)
1625             continue;
1626         if (!block->isOSRTarget)
1627             continue;
1628         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1629     }
1630     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1631     
1632     if (verboseCompilationEnabled()) {
1633         DumpContext dumpContext;
1634         dataLog("OSR Entries:\n");
1635         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1636             dataLog("    ", inContext(entryData, &dumpContext), "\n");
1637         if (!dumpContext.isEmpty())
1638             dumpContext.dump(WTF::dataFile());
1639     }
1640 }
1641
1642 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1643 {
1644     Edge child3 = m_jit.graph().varArgChild(node, 2);
1645     Edge child4 = m_jit.graph().varArgChild(node, 3);
1646
1647     ArrayMode arrayMode = node->arrayMode();
1648     
1649     GPRReg baseReg = base.gpr();
1650     GPRReg propertyReg = property.gpr();
1651     
1652     SpeculateDoubleOperand value(this, child3);
1653
1654     FPRReg valueReg = value.fpr();
1655     
1656     DFG_TYPE_CHECK(
1657         JSValueRegs(), child3, SpecFullRealNumber,
1658         m_jit.branchDouble(
1659             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1660     
1661     if (!m_compileOkay)
1662         return;
1663     
1664     StorageOperand storage(this, child4);
1665     GPRReg storageReg = storage.gpr();
1666
1667     if (node->op() == PutByValAlias) {
1668         // Store the value to the array.
1669         GPRReg propertyReg = property.gpr();
1670         FPRReg valueReg = value.fpr();
1671         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1672         
1673         noResult(m_currentNode);
1674         return;
1675     }
1676     
1677     GPRTemporary temporary;
1678     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1679
1680     MacroAssembler::Jump slowCase;
1681     
1682     if (arrayMode.isInBounds()) {
1683         speculationCheck(
1684             OutOfBounds, JSValueRegs(), 0,
1685             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1686     } else {
1687         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1688         
1689         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1690         
1691         if (!arrayMode.isOutOfBounds())
1692             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1693         
1694         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1695         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1696         
1697         inBounds.link(&m_jit);
1698     }
1699     
1700     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1701
1702     base.use();
1703     property.use();
1704     value.use();
1705     storage.use();
1706     
1707     if (arrayMode.isOutOfBounds()) {
1708         addSlowPathGenerator(
1709             slowPathCall(
1710                 slowCase, this,
1711                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1712                 NoResult, baseReg, propertyReg, valueReg));
1713     }
1714
1715     noResult(m_currentNode, UseChildrenCalledExplicitly);
1716 }
1717
1718 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1719 {
1720     SpeculateCellOperand string(this, node->child1());
1721     SpeculateStrictInt32Operand index(this, node->child2());
1722     StorageOperand storage(this, node->child3());
1723
1724     GPRReg stringReg = string.gpr();
1725     GPRReg indexReg = index.gpr();
1726     GPRReg storageReg = storage.gpr();
1727     
1728     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1729
1730     // unsigned comparison so we can filter out negative indices and indices that are too large
1731     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1732
1733     GPRTemporary scratch(this);
1734     GPRReg scratchReg = scratch.gpr();
1735
1736     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1737
1738     // Load the character into scratchReg
1739     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1740
1741     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1742     JITCompiler::Jump cont8Bit = m_jit.jump();
1743
1744     is16Bit.link(&m_jit);
1745
1746     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1747
1748     cont8Bit.link(&m_jit);
1749
1750     int32Result(scratchReg, m_currentNode);
1751 }
1752
1753 void SpeculativeJIT::compileGetByValOnString(Node* node)
1754 {
1755     SpeculateCellOperand base(this, node->child1());
1756     SpeculateStrictInt32Operand property(this, node->child2());
1757     StorageOperand storage(this, node->child3());
1758     GPRReg baseReg = base.gpr();
1759     GPRReg propertyReg = property.gpr();
1760     GPRReg storageReg = storage.gpr();
1761
1762     GPRTemporary scratch(this);
1763     GPRReg scratchReg = scratch.gpr();
1764 #if USE(JSVALUE32_64)
1765     GPRTemporary resultTag;
1766     GPRReg resultTagReg = InvalidGPRReg;
1767     if (node->arrayMode().isOutOfBounds()) {
1768         GPRTemporary realResultTag(this);
1769         resultTag.adopt(realResultTag);
1770         resultTagReg = resultTag.gpr();
1771     }
1772 #endif
1773
1774     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1775
1776     // unsigned comparison so we can filter out negative indices and indices that are too large
1777     JITCompiler::Jump outOfBounds = m_jit.branch32(
1778         MacroAssembler::AboveOrEqual, propertyReg,
1779         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1780     if (node->arrayMode().isInBounds())
1781         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1782
1783     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1784
1785     // Load the character into scratchReg
1786     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1787
1788     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1789     JITCompiler::Jump cont8Bit = m_jit.jump();
1790
1791     is16Bit.link(&m_jit);
1792
1793     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1794
1795     JITCompiler::Jump bigCharacter =
1796         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1797
1798     // 8 bit string values don't need the isASCII check.
1799     cont8Bit.link(&m_jit);
1800
1801     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1802     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1803     m_jit.loadPtr(scratchReg, scratchReg);
1804
1805     addSlowPathGenerator(
1806         slowPathCall(
1807             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1808
1809     if (node->arrayMode().isOutOfBounds()) {
1810 #if USE(JSVALUE32_64)
1811         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1812 #endif
1813
1814         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1815         if (globalObject->stringPrototypeChainIsSane()) {
1816             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
1817             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
1818             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
1819             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
1820             // indexed properties either.
1821             // https://bugs.webkit.org/show_bug.cgi?id=144668
1822             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
1823             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
1824             
1825 #if USE(JSVALUE64)
1826             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1827                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
1828 #else
1829             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1830                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
1831                 baseReg, propertyReg));
1832 #endif
1833         } else {
1834 #if USE(JSVALUE64)
1835             addSlowPathGenerator(
1836                 slowPathCall(
1837                     outOfBounds, this, operationGetByValStringInt,
1838                     scratchReg, baseReg, propertyReg));
1839 #else
1840             addSlowPathGenerator(
1841                 slowPathCall(
1842                     outOfBounds, this, operationGetByValStringInt,
1843                     resultTagReg, scratchReg, baseReg, propertyReg));
1844 #endif
1845         }
1846         
1847 #if USE(JSVALUE64)
1848         jsValueResult(scratchReg, m_currentNode);
1849 #else
1850         jsValueResult(resultTagReg, scratchReg, m_currentNode);
1851 #endif
1852     } else
1853         cellResult(scratchReg, m_currentNode);
1854 }
1855
1856 void SpeculativeJIT::compileFromCharCode(Node* node)
1857 {
1858     SpeculateStrictInt32Operand property(this, node->child1());
1859     GPRReg propertyReg = property.gpr();
1860     GPRTemporary smallStrings(this);
1861     GPRTemporary scratch(this);
1862     GPRReg scratchReg = scratch.gpr();
1863     GPRReg smallStringsReg = smallStrings.gpr();
1864
1865     JITCompiler::JumpList slowCases;
1866     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
1867     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
1868     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
1869
1870     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
1871     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
1872     cellResult(scratchReg, m_currentNode);
1873 }
1874
1875 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
1876 {
1877     VirtualRegister virtualRegister = node->virtualRegister();
1878     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1879
1880     switch (info.registerFormat()) {
1881     case DataFormatStorage:
1882         RELEASE_ASSERT_NOT_REACHED();
1883
1884     case DataFormatBoolean:
1885     case DataFormatCell:
1886         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1887         return GeneratedOperandTypeUnknown;
1888
1889     case DataFormatNone:
1890     case DataFormatJSCell:
1891     case DataFormatJS:
1892     case DataFormatJSBoolean:
1893     case DataFormatJSDouble:
1894         return GeneratedOperandJSValue;
1895
1896     case DataFormatJSInt32:
1897     case DataFormatInt32:
1898         return GeneratedOperandInteger;
1899
1900     default:
1901         RELEASE_ASSERT_NOT_REACHED();
1902         return GeneratedOperandTypeUnknown;
1903     }
1904 }
1905
1906 void SpeculativeJIT::compileValueToInt32(Node* node)
1907 {
1908     switch (node->child1().useKind()) {
1909 #if USE(JSVALUE64)
1910     case Int52RepUse: {
1911         SpeculateStrictInt52Operand op1(this, node->child1());
1912         GPRTemporary result(this, Reuse, op1);
1913         GPRReg op1GPR = op1.gpr();
1914         GPRReg resultGPR = result.gpr();
1915         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
1916         int32Result(resultGPR, node, DataFormatInt32);
1917         return;
1918     }
1919 #endif // USE(JSVALUE64)
1920         
1921     case DoubleRepUse: {
1922         GPRTemporary result(this);
1923         SpeculateDoubleOperand op1(this, node->child1());
1924         FPRReg fpr = op1.fpr();
1925         GPRReg gpr = result.gpr();
1926         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
1927         
1928         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
1929         
1930         int32Result(gpr, node);
1931         return;
1932     }
1933     
1934     case NumberUse:
1935     case NotCellUse: {
1936         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
1937         case GeneratedOperandInteger: {
1938             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
1939             GPRTemporary result(this, Reuse, op1);
1940             m_jit.move(op1.gpr(), result.gpr());
1941             int32Result(result.gpr(), node, op1.format());
1942             return;
1943         }
1944         case GeneratedOperandJSValue: {
1945             GPRTemporary result(this);
1946 #if USE(JSVALUE64)
1947             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1948
1949             GPRReg gpr = op1.gpr();
1950             GPRReg resultGpr = result.gpr();
1951             FPRTemporary tempFpr(this);
1952             FPRReg fpr = tempFpr.fpr();
1953
1954             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
1955             JITCompiler::JumpList converted;
1956
1957             if (node->child1().useKind() == NumberUse) {
1958                 DFG_TYPE_CHECK(
1959                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
1960                     m_jit.branchTest64(
1961                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
1962             } else {
1963                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
1964                 
1965                 DFG_TYPE_CHECK(
1966                     JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
1967                 
1968                 // It's not a cell: so true turns into 1 and all else turns into 0.
1969                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
1970                 converted.append(m_jit.jump());
1971                 
1972                 isNumber.link(&m_jit);
1973             }
1974
1975             // First, if we get here we have a double encoded as a JSValue
1976             m_jit.move(gpr, resultGpr);
1977             unboxDouble(resultGpr, fpr);
1978
1979             silentSpillAllRegisters(resultGpr);
1980             callOperation(toInt32, resultGpr, fpr);
1981             silentFillAllRegisters(resultGpr);
1982             m_jit.exceptionCheck();
1983
1984             converted.append(m_jit.jump());
1985
1986             isInteger.link(&m_jit);
1987             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
1988
1989             converted.link(&m_jit);
1990 #else
1991             Node* childNode = node->child1().node();
1992             VirtualRegister virtualRegister = childNode->virtualRegister();
1993             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1994
1995             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1996
1997             GPRReg payloadGPR = op1.payloadGPR();
1998             GPRReg resultGpr = result.gpr();
1999         
2000             JITCompiler::JumpList converted;
2001
2002             if (info.registerFormat() == DataFormatJSInt32)
2003                 m_jit.move(payloadGPR, resultGpr);
2004             else {
2005                 GPRReg tagGPR = op1.tagGPR();
2006                 FPRTemporary tempFpr(this);
2007                 FPRReg fpr = tempFpr.fpr();
2008                 FPRTemporary scratch(this);
2009
2010                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2011
2012                 if (node->child1().useKind() == NumberUse) {
2013                     DFG_TYPE_CHECK(
2014                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2015                         m_jit.branch32(
2016                             MacroAssembler::AboveOrEqual, tagGPR,
2017                             TrustedImm32(JSValue::LowestTag)));
2018                 } else {
2019                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2020                     
2021                     DFG_TYPE_CHECK(
2022                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2023                         m_jit.branchIfCell(op1.jsValueRegs()));
2024                     
2025                     // It's not a cell: so true turns into 1 and all else turns into 0.
2026                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2027                     m_jit.move(TrustedImm32(0), resultGpr);
2028                     converted.append(m_jit.jump());
2029                     
2030                     isBoolean.link(&m_jit);
2031                     m_jit.move(payloadGPR, resultGpr);
2032                     converted.append(m_jit.jump());
2033                     
2034                     isNumber.link(&m_jit);
2035                 }
2036
2037                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2038
2039                 silentSpillAllRegisters(resultGpr);
2040                 callOperation(toInt32, resultGpr, fpr);
2041                 silentFillAllRegisters(resultGpr);
2042                 m_jit.exceptionCheck();
2043
2044                 converted.append(m_jit.jump());
2045
2046                 isInteger.link(&m_jit);
2047                 m_jit.move(payloadGPR, resultGpr);
2048
2049                 converted.link(&m_jit);
2050             }
2051 #endif
2052             int32Result(resultGpr, node);
2053             return;
2054         }
2055         case GeneratedOperandTypeUnknown:
2056             RELEASE_ASSERT(!m_compileOkay);
2057             return;
2058         }
2059         RELEASE_ASSERT_NOT_REACHED();
2060         return;
2061     }
2062     
2063     default:
2064         ASSERT(!m_compileOkay);
2065         return;
2066     }
2067 }
2068
2069 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2070 {
2071     if (doesOverflow(node->arithMode())) {
2072         // We know that this sometimes produces doubles. So produce a double every
2073         // time. This at least allows subsequent code to not have weird conditionals.
2074             
2075         SpeculateInt32Operand op1(this, node->child1());
2076         FPRTemporary result(this);
2077             
2078         GPRReg inputGPR = op1.gpr();
2079         FPRReg outputFPR = result.fpr();
2080             
2081         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2082             
2083         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2084         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2085         positive.link(&m_jit);
2086             
2087         doubleResult(outputFPR, node);
2088         return;
2089     }
2090     
2091     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2092
2093     SpeculateInt32Operand op1(this, node->child1());
2094     GPRTemporary result(this);
2095
2096     m_jit.move(op1.gpr(), result.gpr());
2097
2098     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2099
2100     int32Result(result.gpr(), node, op1.format());
2101 }
2102
2103 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2104 {
2105     SpeculateDoubleOperand op1(this, node->child1());
2106     FPRTemporary scratch(this);
2107     GPRTemporary result(this);
2108     
2109     FPRReg valueFPR = op1.fpr();
2110     FPRReg scratchFPR = scratch.fpr();
2111     GPRReg resultGPR = result.gpr();
2112
2113     JITCompiler::JumpList failureCases;
2114     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2115     m_jit.branchConvertDoubleToInt32(
2116         valueFPR, resultGPR, failureCases, scratchFPR,
2117         shouldCheckNegativeZero(node->arithMode()));
2118     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2119
2120     int32Result(resultGPR, node);
2121 }
2122
2123 void SpeculativeJIT::compileDoubleRep(Node* node)
2124 {
2125     switch (node->child1().useKind()) {
2126     case RealNumberUse: {
2127         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2128         FPRTemporary result(this);
2129         
2130         JSValueRegs op1Regs = op1.jsValueRegs();
2131         FPRReg resultFPR = result.fpr();
2132         
2133 #if USE(JSVALUE64)
2134         GPRTemporary temp(this);
2135         GPRReg tempGPR = temp.gpr();
2136         m_jit.move(op1Regs.gpr(), tempGPR);
2137         m_jit.unboxDoubleWithoutAssertions(tempGPR, resultFPR);
2138 #else
2139         FPRTemporary temp(this);
2140         FPRReg tempFPR = temp.fpr();
2141         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2142 #endif
2143         
2144         JITCompiler::Jump done = m_jit.branchDouble(
2145             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2146         
2147         DFG_TYPE_CHECK(
2148             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2149         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2150         
2151         done.link(&m_jit);
2152         
2153         doubleResult(resultFPR, node);
2154         return;
2155     }
2156     
2157     case NotCellUse:
2158     case NumberUse: {
2159         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2160
2161         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2162         if (isInt32Speculation(possibleTypes)) {
2163             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2164             FPRTemporary result(this);
2165             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2166             doubleResult(result.fpr(), node);
2167             return;
2168         }
2169
2170         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2171         FPRTemporary result(this);
2172
2173 #if USE(JSVALUE64)
2174         GPRTemporary temp(this);
2175
2176         GPRReg op1GPR = op1.gpr();
2177         GPRReg tempGPR = temp.gpr();
2178         FPRReg resultFPR = result.fpr();
2179         JITCompiler::JumpList done;
2180
2181         JITCompiler::Jump isInteger = m_jit.branch64(
2182             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2183
2184         if (node->child1().useKind() == NotCellUse) {
2185             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2186             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2187
2188             static const double zero = 0;
2189             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2190
2191             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2192             done.append(isNull);
2193
2194             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2195                 m_jit.branchTest64(JITCompiler::NonZero, op1GPR, TrustedImm32(static_cast<int32_t>(~1))));
2196
2197             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2198             static const double one = 1;
2199             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2200             done.append(isFalse);
2201
2202             isUndefined.link(&m_jit);
2203             static const double NaN = PNaN;
2204             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2205             done.append(m_jit.jump());
2206
2207             isNumber.link(&m_jit);
2208         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2209             typeCheck(
2210                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2211                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2212         }
2213     
2214         m_jit.move(op1GPR, tempGPR);
2215         unboxDouble(tempGPR, resultFPR);
2216         done.append(m_jit.jump());
2217     
2218         isInteger.link(&m_jit);
2219         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2220         done.link(&m_jit);
2221 #else // USE(JSVALUE64) -> this is the 32_64 case
2222         FPRTemporary temp(this);
2223     
2224         GPRReg op1TagGPR = op1.tagGPR();
2225         GPRReg op1PayloadGPR = op1.payloadGPR();
2226         FPRReg tempFPR = temp.fpr();
2227         FPRReg resultFPR = result.fpr();
2228         JITCompiler::JumpList done;
2229     
2230         JITCompiler::Jump isInteger = m_jit.branch32(
2231             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2232
2233         if (node->child1().useKind() == NotCellUse) {
2234             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2235             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2236
2237             static const double zero = 0;
2238             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2239
2240             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2241             done.append(isNull);
2242
2243             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2244
2245             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2246             static const double one = 1;
2247             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2248             done.append(isFalse);
2249
2250             isUndefined.link(&m_jit);
2251             static const double NaN = PNaN;
2252             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2253             done.append(m_jit.jump());
2254
2255             isNumber.link(&m_jit);
2256         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2257             typeCheck(
2258                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2259                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2260         }
2261
2262         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2263         done.append(m_jit.jump());
2264     
2265         isInteger.link(&m_jit);
2266         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2267         done.link(&m_jit);
2268 #endif // USE(JSVALUE64)
2269     
2270         doubleResult(resultFPR, node);
2271         return;
2272     }
2273         
2274 #if USE(JSVALUE64)
2275     case Int52RepUse: {
2276         SpeculateStrictInt52Operand value(this, node->child1());
2277         FPRTemporary result(this);
2278         
2279         GPRReg valueGPR = value.gpr();
2280         FPRReg resultFPR = result.fpr();
2281
2282         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2283         
2284         doubleResult(resultFPR, node);
2285         return;
2286     }
2287 #endif // USE(JSVALUE64)
2288         
2289     default:
2290         RELEASE_ASSERT_NOT_REACHED();
2291         return;
2292     }
2293 }
2294
2295 void SpeculativeJIT::compileValueRep(Node* node)
2296 {
2297     switch (node->child1().useKind()) {
2298     case DoubleRepUse: {
2299         SpeculateDoubleOperand value(this, node->child1());
2300         JSValueRegsTemporary result(this);
2301         
2302         FPRReg valueFPR = value.fpr();
2303         JSValueRegs resultRegs = result.regs();
2304         
2305         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2306         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2307         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2308         // local was purified.
2309         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2310             m_jit.purifyNaN(valueFPR);
2311
2312         boxDouble(valueFPR, resultRegs);
2313         
2314         jsValueResult(resultRegs, node);
2315         return;
2316     }
2317         
2318 #if USE(JSVALUE64)
2319     case Int52RepUse: {
2320         SpeculateStrictInt52Operand value(this, node->child1());
2321         GPRTemporary result(this);
2322         
2323         GPRReg valueGPR = value.gpr();
2324         GPRReg resultGPR = result.gpr();
2325         
2326         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2327         
2328         jsValueResult(resultGPR, node);
2329         return;
2330     }
2331 #endif // USE(JSVALUE64)
2332         
2333     default:
2334         RELEASE_ASSERT_NOT_REACHED();
2335         return;
2336     }
2337 }
2338
2339 static double clampDoubleToByte(double d)
2340 {
2341     d += 0.5;
2342     if (!(d > 0))
2343         d = 0;
2344     else if (d > 255)
2345         d = 255;
2346     return d;
2347 }
2348
2349 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2350 {
2351     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2352     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2353     jit.xorPtr(result, result);
2354     MacroAssembler::Jump clamped = jit.jump();
2355     tooBig.link(&jit);
2356     jit.move(JITCompiler::TrustedImm32(255), result);
2357     clamped.link(&jit);
2358     inBounds.link(&jit);
2359 }
2360
2361 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2362 {
2363     // Unordered compare so we pick up NaN
2364     static const double zero = 0;
2365     static const double byteMax = 255;
2366     static const double half = 0.5;
2367     jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2368     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2369     jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2370     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2371     
2372     jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2373     // FIXME: This should probably just use a floating point round!
2374     // https://bugs.webkit.org/show_bug.cgi?id=72054
2375     jit.addDouble(source, scratch);
2376     jit.truncateDoubleToInt32(scratch, result);   
2377     MacroAssembler::Jump truncatedInt = jit.jump();
2378     
2379     tooSmall.link(&jit);
2380     jit.xorPtr(result, result);
2381     MacroAssembler::Jump zeroed = jit.jump();
2382     
2383     tooBig.link(&jit);
2384     jit.move(JITCompiler::TrustedImm32(255), result);
2385     
2386     truncatedInt.link(&jit);
2387     zeroed.link(&jit);
2388
2389 }
2390
2391 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2392 {
2393     if (node->op() == PutByValAlias)
2394         return JITCompiler::Jump();
2395     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2396         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2397     if (view) {
2398         uint32_t length = view->length();
2399         Node* indexNode = m_jit.graph().child(node, 1).node();
2400         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2401             return JITCompiler::Jump();
2402         return m_jit.branch32(
2403             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2404     }
2405     return m_jit.branch32(
2406         MacroAssembler::AboveOrEqual, indexGPR,
2407         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2408 }
2409
2410 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2411 {
2412     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2413     if (!jump.isSet())
2414         return;
2415     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2416 }
2417
2418 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2419 {
2420     ASSERT(isInt(type));
2421     
2422     SpeculateCellOperand base(this, node->child1());
2423     SpeculateStrictInt32Operand property(this, node->child2());
2424     StorageOperand storage(this, node->child3());
2425
2426     GPRReg baseReg = base.gpr();
2427     GPRReg propertyReg = property.gpr();
2428     GPRReg storageReg = storage.gpr();
2429
2430     GPRTemporary result(this);
2431     GPRReg resultReg = result.gpr();
2432
2433     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2434
2435     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2436     switch (elementSize(type)) {
2437     case 1:
2438         if (isSigned(type))
2439             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2440         else
2441             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2442         break;
2443     case 2:
2444         if (isSigned(type))
2445             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2446         else
2447             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2448         break;
2449     case 4:
2450         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2451         break;
2452     default:
2453         CRASH();
2454     }
2455     if (elementSize(type) < 4 || isSigned(type)) {
2456         int32Result(resultReg, node);
2457         return;
2458     }
2459     
2460     ASSERT(elementSize(type) == 4 && !isSigned(type));
2461     if (node->shouldSpeculateInt32()) {
2462         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2463         int32Result(resultReg, node);
2464         return;
2465     }
2466     
2467 #if USE(JSVALUE64)
2468     if (node->shouldSpeculateMachineInt()) {
2469         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2470         strictInt52Result(resultReg, node);
2471         return;
2472     }
2473 #endif
2474     
2475     FPRTemporary fresult(this);
2476     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2477     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2478     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2479     positive.link(&m_jit);
2480     doubleResult(fresult.fpr(), node);
2481 }
2482
2483 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2484 {
2485     ASSERT(isInt(type));
2486     
2487     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2488     GPRReg storageReg = storage.gpr();
2489     
2490     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2491     
2492     GPRTemporary value;
2493     GPRReg valueGPR = InvalidGPRReg;
2494     
2495     if (valueUse->isConstant()) {
2496         JSValue jsValue = valueUse->asJSValue();
2497         if (!jsValue.isNumber()) {
2498             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2499             noResult(node);
2500             return;
2501         }
2502         double d = jsValue.asNumber();
2503         if (isClamped(type)) {
2504             ASSERT(elementSize(type) == 1);
2505             d = clampDoubleToByte(d);
2506         }
2507         GPRTemporary scratch(this);
2508         GPRReg scratchReg = scratch.gpr();
2509         m_jit.move(Imm32(toInt32(d)), scratchReg);
2510         value.adopt(scratch);
2511         valueGPR = scratchReg;
2512     } else {
2513         switch (valueUse.useKind()) {
2514         case Int32Use: {
2515             SpeculateInt32Operand valueOp(this, valueUse);
2516             GPRTemporary scratch(this);
2517             GPRReg scratchReg = scratch.gpr();
2518             m_jit.move(valueOp.gpr(), scratchReg);
2519             if (isClamped(type)) {
2520                 ASSERT(elementSize(type) == 1);
2521                 compileClampIntegerToByte(m_jit, scratchReg);
2522             }
2523             value.adopt(scratch);
2524             valueGPR = scratchReg;
2525             break;
2526         }
2527             
2528 #if USE(JSVALUE64)
2529         case Int52RepUse: {
2530             SpeculateStrictInt52Operand valueOp(this, valueUse);
2531             GPRTemporary scratch(this);
2532             GPRReg scratchReg = scratch.gpr();
2533             m_jit.move(valueOp.gpr(), scratchReg);
2534             if (isClamped(type)) {
2535                 ASSERT(elementSize(type) == 1);
2536                 MacroAssembler::Jump inBounds = m_jit.branch64(
2537                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2538                 MacroAssembler::Jump tooBig = m_jit.branch64(
2539                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2540                 m_jit.move(TrustedImm32(0), scratchReg);
2541                 MacroAssembler::Jump clamped = m_jit.jump();
2542                 tooBig.link(&m_jit);
2543                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2544                 clamped.link(&m_jit);
2545                 inBounds.link(&m_jit);
2546             }
2547             value.adopt(scratch);
2548             valueGPR = scratchReg;
2549             break;
2550         }
2551 #endif // USE(JSVALUE64)
2552             
2553         case DoubleRepUse: {
2554             if (isClamped(type)) {
2555                 ASSERT(elementSize(type) == 1);
2556                 SpeculateDoubleOperand valueOp(this, valueUse);
2557                 GPRTemporary result(this);
2558                 FPRTemporary floatScratch(this);
2559                 FPRReg fpr = valueOp.fpr();
2560                 GPRReg gpr = result.gpr();
2561                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2562                 value.adopt(result);
2563                 valueGPR = gpr;
2564             } else {
2565                 SpeculateDoubleOperand valueOp(this, valueUse);
2566                 GPRTemporary result(this);
2567                 FPRReg fpr = valueOp.fpr();
2568                 GPRReg gpr = result.gpr();
2569                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2570                 m_jit.xorPtr(gpr, gpr);
2571                 MacroAssembler::Jump fixed = m_jit.jump();
2572                 notNaN.link(&m_jit);
2573                 
2574                 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2575                     fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2576                 
2577                 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr));
2578                 
2579                 fixed.link(&m_jit);
2580                 value.adopt(result);
2581                 valueGPR = gpr;
2582             }
2583             break;
2584         }
2585             
2586         default:
2587             RELEASE_ASSERT_NOT_REACHED();
2588             break;
2589         }
2590     }
2591     
2592     ASSERT_UNUSED(valueGPR, valueGPR != property);
2593     ASSERT(valueGPR != base);
2594     ASSERT(valueGPR != storageReg);
2595     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2596     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2597         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2598         outOfBounds = MacroAssembler::Jump();
2599     }
2600
2601     switch (elementSize(type)) {
2602     case 1:
2603         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2604         break;
2605     case 2:
2606         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2607         break;
2608     case 4:
2609         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2610         break;
2611     default:
2612         CRASH();
2613     }
2614     if (outOfBounds.isSet())
2615         outOfBounds.link(&m_jit);
2616     noResult(node);
2617 }
2618
2619 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2620 {
2621     ASSERT(isFloat(type));
2622     
2623     SpeculateCellOperand base(this, node->child1());
2624     SpeculateStrictInt32Operand property(this, node->child2());
2625     StorageOperand storage(this, node->child3());
2626
2627     GPRReg baseReg = base.gpr();
2628     GPRReg propertyReg = property.gpr();
2629     GPRReg storageReg = storage.gpr();
2630
2631     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2632
2633     FPRTemporary result(this);
2634     FPRReg resultReg = result.fpr();
2635     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2636     switch (elementSize(type)) {
2637     case 4:
2638         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2639         m_jit.convertFloatToDouble(resultReg, resultReg);
2640         break;
2641     case 8: {
2642         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2643         break;
2644     }
2645     default:
2646         RELEASE_ASSERT_NOT_REACHED();
2647     }
2648     
2649     doubleResult(resultReg, node);
2650 }
2651
2652 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2653 {
2654     ASSERT(isFloat(type));
2655     
2656     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2657     GPRReg storageReg = storage.gpr();
2658     
2659     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2660     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2661
2662     SpeculateDoubleOperand valueOp(this, valueUse);
2663     FPRTemporary scratch(this);
2664     FPRReg valueFPR = valueOp.fpr();
2665     FPRReg scratchFPR = scratch.fpr();
2666
2667     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2668     
2669     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2670     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2671         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2672         outOfBounds = MacroAssembler::Jump();
2673     }
2674     
2675     switch (elementSize(type)) {
2676     case 4: {
2677         m_jit.moveDouble(valueFPR, scratchFPR);
2678         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2679         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2680         break;
2681     }
2682     case 8:
2683         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2684         break;
2685     default:
2686         RELEASE_ASSERT_NOT_REACHED();
2687     }
2688     if (outOfBounds.isSet())
2689         outOfBounds.link(&m_jit);
2690     noResult(node);
2691 }
2692
2693 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2694 {
2695     // Check that prototype is an object.
2696     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
2697     
2698     // Initialize scratchReg with the value being checked.
2699     m_jit.move(valueReg, scratchReg);
2700     
2701     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2702     MacroAssembler::Label loop(&m_jit);
2703     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2704     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2705     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2706 #if USE(JSVALUE64)
2707     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2708 #else
2709     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2710 #endif
2711     
2712     // No match - result is false.
2713 #if USE(JSVALUE64)
2714     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2715 #else
2716     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2717 #endif
2718     MacroAssembler::Jump putResult = m_jit.jump();
2719     
2720     isInstance.link(&m_jit);
2721 #if USE(JSVALUE64)
2722     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2723 #else
2724     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2725 #endif
2726     
2727     putResult.link(&m_jit);
2728 }
2729
2730 void SpeculativeJIT::compileInstanceOf(Node* node)
2731 {
2732     if (node->child1().useKind() == UntypedUse) {
2733         // It might not be a cell. Speculate less aggressively.
2734         // Or: it might only be used once (i.e. by us), so we get zero benefit
2735         // from speculating any more aggressively than we absolutely need to.
2736         
2737         JSValueOperand value(this, node->child1());
2738         SpeculateCellOperand prototype(this, node->child2());
2739         GPRTemporary scratch(this);
2740         GPRTemporary scratch2(this);
2741         
2742         GPRReg prototypeReg = prototype.gpr();
2743         GPRReg scratchReg = scratch.gpr();
2744         GPRReg scratch2Reg = scratch2.gpr();
2745         
2746         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
2747         GPRReg valueReg = value.jsValueRegs().payloadGPR();
2748         moveFalseTo(scratchReg);
2749
2750         MacroAssembler::Jump done = m_jit.jump();
2751         
2752         isCell.link(&m_jit);
2753         
2754         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2755         
2756         done.link(&m_jit);
2757
2758         blessedBooleanResult(scratchReg, node);
2759         return;
2760     }
2761     
2762     SpeculateCellOperand value(this, node->child1());
2763     SpeculateCellOperand prototype(this, node->child2());
2764     
2765     GPRTemporary scratch(this);
2766     GPRTemporary scratch2(this);
2767     
2768     GPRReg valueReg = value.gpr();
2769     GPRReg prototypeReg = prototype.gpr();
2770     GPRReg scratchReg = scratch.gpr();
2771     GPRReg scratch2Reg = scratch2.gpr();
2772     
2773     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2774
2775     blessedBooleanResult(scratchReg, node);
2776 }
2777
2778 void SpeculativeJIT::compileAdd(Node* node)
2779 {
2780     switch (node->binaryUseKind()) {
2781     case Int32Use: {
2782         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2783         
2784         if (node->child1()->isInt32Constant()) {
2785             int32_t imm1 = node->child1()->asInt32();
2786             SpeculateInt32Operand op2(this, node->child2());
2787             GPRTemporary result(this);
2788
2789             if (!shouldCheckOverflow(node->arithMode())) {
2790                 m_jit.move(op2.gpr(), result.gpr());
2791                 m_jit.add32(Imm32(imm1), result.gpr());
2792             } else
2793                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
2794
2795             int32Result(result.gpr(), node);
2796             return;
2797         }
2798         
2799         if (node->child2()->isInt32Constant()) {
2800             SpeculateInt32Operand op1(this, node->child1());
2801             int32_t imm2 = node->child2()->asInt32();
2802             GPRTemporary result(this);
2803                 
2804             if (!shouldCheckOverflow(node->arithMode())) {
2805                 m_jit.move(op1.gpr(), result.gpr());
2806                 m_jit.add32(Imm32(imm2), result.gpr());
2807             } else
2808                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
2809
2810             int32Result(result.gpr(), node);
2811             return;
2812         }
2813                 
2814         SpeculateInt32Operand op1(this, node->child1());
2815         SpeculateInt32Operand op2(this, node->child2());
2816         GPRTemporary result(this, Reuse, op1, op2);
2817
2818         GPRReg gpr1 = op1.gpr();
2819         GPRReg gpr2 = op2.gpr();
2820         GPRReg gprResult = result.gpr();
2821
2822         if (!shouldCheckOverflow(node->arithMode())) {
2823             if (gpr1 == gprResult)
2824                 m_jit.add32(gpr2, gprResult);
2825             else {
2826                 m_jit.move(gpr2, gprResult);
2827                 m_jit.add32(gpr1, gprResult);
2828             }
2829         } else {
2830             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
2831                 
2832             if (gpr1 == gprResult)
2833                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
2834             else if (gpr2 == gprResult)
2835                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
2836             else
2837                 speculationCheck(Overflow, JSValueRegs(), 0, check);
2838         }
2839
2840         int32Result(gprResult, node);
2841         return;
2842     }
2843         
2844 #if USE(JSVALUE64)
2845     case Int52RepUse: {
2846         ASSERT(shouldCheckOverflow(node->arithMode()));
2847         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2848
2849         // Will we need an overflow check? If we can prove that neither input can be
2850         // Int52 then the overflow check will not be necessary.
2851         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2852             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2853             SpeculateWhicheverInt52Operand op1(this, node->child1());
2854             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2855             GPRTemporary result(this, Reuse, op1);
2856             m_jit.move(op1.gpr(), result.gpr());
2857             m_jit.add64(op2.gpr(), result.gpr());
2858             int52Result(result.gpr(), node, op1.format());
2859             return;
2860         }
2861         
2862         SpeculateInt52Operand op1(this, node->child1());
2863         SpeculateInt52Operand op2(this, node->child2());
2864         GPRTemporary result(this);
2865         m_jit.move(op1.gpr(), result.gpr());
2866         speculationCheck(
2867             Int52Overflow, JSValueRegs(), 0,
2868             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2869         int52Result(result.gpr(), node);
2870         return;
2871     }
2872 #endif // USE(JSVALUE64)
2873     
2874     case DoubleRepUse: {
2875         SpeculateDoubleOperand op1(this, node->child1());
2876         SpeculateDoubleOperand op2(this, node->child2());
2877         FPRTemporary result(this, op1, op2);
2878
2879         FPRReg reg1 = op1.fpr();
2880         FPRReg reg2 = op2.fpr();
2881         m_jit.addDouble(reg1, reg2, result.fpr());
2882
2883         doubleResult(result.fpr(), node);
2884         return;
2885     }
2886         
2887     default:
2888         RELEASE_ASSERT_NOT_REACHED();
2889         break;
2890     }
2891 }
2892
2893 void SpeculativeJIT::compileMakeRope(Node* node)
2894 {
2895     ASSERT(node->child1().useKind() == KnownStringUse);
2896     ASSERT(node->child2().useKind() == KnownStringUse);
2897     ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
2898     
2899     SpeculateCellOperand op1(this, node->child1());
2900     SpeculateCellOperand op2(this, node->child2());
2901     SpeculateCellOperand op3(this, node->child3());
2902     GPRTemporary result(this);
2903     GPRTemporary allocator(this);
2904     GPRTemporary scratch(this);
2905     
2906     GPRReg opGPRs[3];
2907     unsigned numOpGPRs;
2908     opGPRs[0] = op1.gpr();
2909     opGPRs[1] = op2.gpr();
2910     if (node->child3()) {
2911         opGPRs[2] = op3.gpr();
2912         numOpGPRs = 3;
2913     } else {
2914         opGPRs[2] = InvalidGPRReg;
2915         numOpGPRs = 2;
2916     }
2917     GPRReg resultGPR = result.gpr();
2918     GPRReg allocatorGPR = allocator.gpr();
2919     GPRReg scratchGPR = scratch.gpr();
2920     
2921     JITCompiler::JumpList slowPath;
2922     MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithDestructor(sizeof(JSRopeString));
2923     m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
2924     emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
2925         
2926     m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
2927     for (unsigned i = 0; i < numOpGPRs; ++i)
2928         m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2929     for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
2930         m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2931     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
2932     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
2933     if (!ASSERT_DISABLED) {
2934         JITCompiler::Jump ok = m_jit.branch32(
2935             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2936         m_jit.abortWithReason(DFGNegativeStringLength);
2937         ok.link(&m_jit);
2938     }
2939     for (unsigned i = 1; i < numOpGPRs; ++i) {
2940         m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
2941         speculationCheck(
2942             Uncountable, JSValueSource(), nullptr,
2943             m_jit.branchAdd32(
2944                 JITCompiler::Overflow,
2945                 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
2946     }
2947     m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
2948     m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
2949     if (!ASSERT_DISABLED) {
2950         JITCompiler::Jump ok = m_jit.branch32(
2951             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2952         m_jit.abortWithReason(DFGNegativeStringLength);
2953         ok.link(&m_jit);
2954     }
2955     m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
2956     
2957     switch (numOpGPRs) {
2958     case 2:
2959         addSlowPathGenerator(slowPathCall(
2960             slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
2961         break;
2962     case 3:
2963         addSlowPathGenerator(slowPathCall(
2964             slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
2965         break;
2966     default:
2967         RELEASE_ASSERT_NOT_REACHED();
2968         break;
2969     }
2970         
2971     cellResult(resultGPR, node);
2972 }
2973
2974 void SpeculativeJIT::compileArithClz32(Node* node)
2975 {
2976     ASSERT_WITH_MESSAGE(node->child1().useKind() == Int32Use || node->child1().useKind() == KnownInt32Use, "The Fixup phase should have enforced a Int32 operand.");
2977     SpeculateInt32Operand value(this, node->child1());
2978     GPRTemporary result(this, Reuse, value);
2979     GPRReg valueReg = value.gpr();
2980     GPRReg resultReg = result.gpr();
2981     m_jit.countLeadingZeros32(valueReg, resultReg);
2982     int32Result(resultReg, node);
2983 }
2984
2985 void SpeculativeJIT::compileArithSub(Node* node)
2986 {
2987     switch (node->binaryUseKind()) {
2988     case Int32Use: {
2989         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2990         
2991         if (node->child2()->isInt32Constant()) {
2992             SpeculateInt32Operand op1(this, node->child1());
2993             int32_t imm2 = node->child2()->asInt32();
2994             GPRTemporary result(this);
2995
2996             if (!shouldCheckOverflow(node->arithMode())) {
2997                 m_jit.move(op1.gpr(), result.gpr());
2998                 m_jit.sub32(Imm32(imm2), result.gpr());
2999             } else {
3000                 GPRTemporary scratch(this);
3001                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
3002             }
3003
3004             int32Result(result.gpr(), node);
3005             return;
3006         }
3007             
3008         if (node->child1()->isInt32Constant()) {
3009             int32_t imm1 = node->child1()->asInt32();
3010             SpeculateInt32Operand op2(this, node->child2());
3011             GPRTemporary result(this);
3012                 
3013             m_jit.move(Imm32(imm1), result.gpr());
3014             if (!shouldCheckOverflow(node->arithMode()))
3015                 m_jit.sub32(op2.gpr(), result.gpr());
3016             else
3017                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3018                 
3019             int32Result(result.gpr(), node);
3020             return;
3021         }
3022             
3023         SpeculateInt32Operand op1(this, node->child1());
3024         SpeculateInt32Operand op2(this, node->child2());
3025         GPRTemporary result(this);
3026
3027         if (!shouldCheckOverflow(node->arithMode())) {
3028             m_jit.move(op1.gpr(), result.gpr());
3029             m_jit.sub32(op2.gpr(), result.gpr());
3030         } else
3031             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
3032
3033         int32Result(result.gpr(), node);
3034         return;
3035     }
3036         
3037 #if USE(JSVALUE64)
3038     case Int52RepUse: {
3039         ASSERT(shouldCheckOverflow(node->arithMode()));
3040         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3041
3042         // Will we need an overflow check? If we can prove that neither input can be
3043         // Int52 then the overflow check will not be necessary.
3044         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
3045             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
3046             SpeculateWhicheverInt52Operand op1(this, node->child1());
3047             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3048             GPRTemporary result(this, Reuse, op1);
3049             m_jit.move(op1.gpr(), result.gpr());
3050             m_jit.sub64(op2.gpr(), result.gpr());
3051             int52Result(result.gpr(), node, op1.format());
3052             return;
3053         }
3054         
3055         SpeculateInt52Operand op1(this, node->child1());
3056         SpeculateInt52Operand op2(this, node->child2());
3057         GPRTemporary result(this);
3058         m_jit.move(op1.gpr(), result.gpr());
3059         speculationCheck(
3060             Int52Overflow, JSValueRegs(), 0,
3061             m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3062         int52Result(result.gpr(), node);
3063         return;
3064     }
3065 #endif // USE(JSVALUE64)
3066
3067     case DoubleRepUse: {
3068         SpeculateDoubleOperand op1(this, node->child1());
3069         SpeculateDoubleOperand op2(this, node->child2());
3070         FPRTemporary result(this, op1);
3071
3072         FPRReg reg1 = op1.fpr();
3073         FPRReg reg2 = op2.fpr();
3074         m_jit.subDouble(reg1, reg2, result.fpr());
3075
3076         doubleResult(result.fpr(), node);
3077         return;
3078     }
3079         
3080     default:
3081         RELEASE_ASSERT_NOT_REACHED();
3082         return;
3083     }
3084 }
3085
3086 void SpeculativeJIT::compileArithNegate(Node* node)
3087 {
3088     switch (node->child1().useKind()) {
3089     case Int32Use: {
3090         SpeculateInt32Operand op1(this, node->child1());
3091         GPRTemporary result(this);
3092
3093         m_jit.move(op1.gpr(), result.gpr());
3094
3095         // Note: there is no notion of being not used as a number, but someone
3096         // caring about negative zero.
3097         
3098         if (!shouldCheckOverflow(node->arithMode()))
3099             m_jit.neg32(result.gpr());
3100         else if (!shouldCheckNegativeZero(node->arithMode()))
3101             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
3102         else {
3103             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
3104             m_jit.neg32(result.gpr());
3105         }
3106
3107         int32Result(result.gpr(), node);
3108         return;
3109     }
3110
3111 #if USE(JSVALUE64)
3112     case Int52RepUse: {
3113         ASSERT(shouldCheckOverflow(node->arithMode()));
3114         
3115         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)) {
3116             SpeculateWhicheverInt52Operand op1(this, node->child1());
3117             GPRTemporary result(this);
3118             GPRReg op1GPR = op1.gpr();
3119             GPRReg resultGPR = result.gpr();
3120             m_jit.move(op1GPR, resultGPR);
3121             m_jit.neg64(resultGPR);
3122             if (shouldCheckNegativeZero(node->arithMode())) {
3123                 speculationCheck(
3124                     NegativeZero, JSValueRegs(), 0,
3125                     m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3126             }
3127             int52Result(resultGPR, node, op1.format());
3128             return;
3129         }
3130         
3131         SpeculateInt52Operand op1(this, node->child1());
3132         GPRTemporary result(this);
3133         GPRReg op1GPR = op1.gpr();
3134         GPRReg resultGPR = result.gpr();
3135         m_jit.move(op1GPR, resultGPR);
3136         speculationCheck(
3137             Int52Overflow, JSValueRegs(), 0,
3138             m_jit.branchNeg64(MacroAssembler::Overflow, resultGPR));
3139         if (shouldCheckNegativeZero(node->arithMode())) {
3140             speculationCheck(
3141                 NegativeZero, JSValueRegs(), 0,
3142                 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3143         }
3144         int52Result(resultGPR, node);
3145         return;
3146     }
3147 #endif // USE(JSVALUE64)
3148         
3149     case DoubleRepUse: {
3150         SpeculateDoubleOperand op1(this, node->child1());
3151         FPRTemporary result(this);
3152         
3153         m_jit.negateDouble(op1.fpr(), result.fpr());
3154         
3155         doubleResult(result.fpr(), node);
3156         return;
3157     }
3158         
3159     default:
3160         RELEASE_ASSERT_NOT_REACHED();
3161         return;
3162     }
3163 }
3164 void SpeculativeJIT::compileArithMul(Node* node)
3165 {
3166     switch (node->binaryUseKind()) {
3167     case Int32Use: {
3168         SpeculateInt32Operand op1(this, node->child1());
3169         SpeculateInt32Operand op2(this, node->child2());
3170         GPRTemporary result(this);
3171
3172         GPRReg reg1 = op1.gpr();
3173         GPRReg reg2 = op2.gpr();
3174
3175         // We can perform truncated multiplications if we get to this point, because if the
3176         // fixup phase could not prove that it would be safe, it would have turned us into
3177         // a double multiplication.
3178         if (!shouldCheckOverflow(node->arithMode())) {
3179             m_jit.move(reg1, result.gpr());
3180             m_jit.mul32(reg2, result.gpr());
3181         } else {
3182             speculationCheck(
3183                 Overflow, JSValueRegs(), 0,
3184                 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3185         }
3186             
3187         // Check for negative zero, if the users of this node care about such things.
3188         if (shouldCheckNegativeZero(node->arithMode())) {
3189             MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3190             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
3191             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
3192             resultNonZero.link(&m_jit);
3193         }
3194
3195         int32Result(result.gpr(), node);
3196         return;
3197     }
3198     
3199 #if USE(JSVALUE64)   
3200     case Int52RepUse: {
3201         ASSERT(shouldCheckOverflow(node->arithMode()));
3202         
3203         // This is super clever. We want to do an int52 multiplication and check the
3204         // int52 overflow bit. There is no direct hardware support for this, but we do
3205         // have the ability to do an int64 multiplication and check the int64 overflow
3206         // bit. We leverage that. Consider that a, b are int52 numbers inside int64
3207         // registers, with the high 12 bits being sign-extended. We can do:
3208         //
3209         //     (a * (b << 12))
3210         //
3211         // This will give us a left-shifted int52 (value is in high 52 bits, low 16
3212         // bits are zero) plus the int52 overflow bit. I.e. whether this 64-bit
3213         // multiplication overflows is identical to whether the 'a * b' 52-bit
3214         // multiplication overflows.
3215         //
3216         // In our nomenclature, this is:
3217         //
3218         //     strictInt52(a) * int52(b) => int52
3219         //
3220         // That is "strictInt52" means unshifted and "int52" means left-shifted by 16
3221         // bits.
3222         //
3223         // We don't care which of op1 or op2 serves as the left-shifted operand, so
3224         // we just do whatever is more convenient for op1 and have op2 do the
3225         // opposite. This ensures that we do at most one shift.
3226
3227         SpeculateWhicheverInt52Operand op1(this, node->child1());
3228         SpeculateWhicheverInt52Operand op2(this, node->child2(), OppositeShift, op1);
3229         GPRTemporary result(this);
3230         
3231         GPRReg op1GPR = op1.gpr();
3232         GPRReg op2GPR = op2.gpr();
3233         GPRReg resultGPR = result.gpr();
3234         
3235         m_jit.move(op1GPR, resultGPR);
3236         speculationCheck(
3237             Int52Overflow, JSValueRegs(), 0,
3238             m_jit.branchMul64(MacroAssembler::Overflow, op2GPR, resultGPR));
3239         
3240         if (shouldCheckNegativeZero(node->arithMode())) {
3241             MacroAssembler::Jump resultNonZero = m_jit.branchTest64(
3242                 MacroAssembler::NonZero, resultGPR);
3243             speculationCheck(
3244                 NegativeZero, JSValueRegs(), 0,
3245                 m_jit.branch64(MacroAssembler::LessThan, op1GPR, TrustedImm64(0)));
3246             speculationCheck(
3247                 NegativeZero, JSValueRegs(), 0,
3248                 m_jit.branch64(MacroAssembler::LessThan, op2GPR, TrustedImm64(0)));
3249             resultNonZero.link(&m_jit);
3250         }
3251         
3252         int52Result(resultGPR, node);
3253         return;
3254     }
3255 #endif // USE(JSVALUE64)
3256         
3257     case DoubleRepUse: {
3258         SpeculateDoubleOperand op1(this, node->child1());
3259         SpeculateDoubleOperand op2(this, node->child2());
3260         FPRTemporary result(this, op1, op2);
3261         
3262         FPRReg reg1 = op1.fpr();
3263         FPRReg reg2 = op2.fpr();
3264         
3265         m_jit.mulDouble(reg1, reg2, result.fpr());
3266         
3267         doubleResult(result.fpr(), node);
3268         return;
3269     }
3270         
3271     default:
3272         RELEASE_ASSERT_NOT_REACHED();
3273         return;
3274     }
3275 }
3276
3277 void SpeculativeJIT::compileArithDiv(Node* node)
3278 {
3279     switch (node->binaryUseKind()) {
3280     case Int32Use: {
3281 #if CPU(X86) || CPU(X86_64)
3282         SpeculateInt32Operand op1(this, node->child1());
3283         SpeculateInt32Operand op2(this, node->child2());
3284         GPRTemporary eax(this, X86Registers::eax);
3285         GPRTemporary edx(this, X86Registers::edx);
3286         GPRReg op1GPR = op1.gpr();
3287         GPRReg op2GPR = op2.gpr();
3288     
3289         GPRReg op2TempGPR;
3290         GPRReg temp;
3291         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3292             op2TempGPR = allocate();
3293             temp = op2TempGPR;
3294         } else {
3295             op2TempGPR = InvalidGPRReg;
3296             if (op1GPR == X86Registers::eax)
3297                 temp = X86Registers::edx;
3298             else
3299                 temp = X86Registers::eax;
3300         }
3301     
3302         ASSERT(temp != op1GPR);
3303         ASSERT(temp != op2GPR);
3304     
3305         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3306     
3307         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3308     
3309         JITCompiler::JumpList done;
3310         if (shouldCheckOverflow(node->arithMode())) {
3311             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3312             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3313         } else {
3314             // This is the case where we convert the result to an int after we're done, and we
3315             // already know that the denominator is either -1 or 0. So, if the denominator is
3316             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3317             // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
3318             // are happy to fall through to a normal division, since we're just dividing
3319             // something by negative 1.
3320         
3321             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3322             m_jit.move(TrustedImm32(0), eax.gpr());
3323             done.append(m_jit.jump());
3324         
3325             notZero.link(&m_jit);
3326             JITCompiler::Jump notNeg2ToThe31 =
3327                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3328             m_jit.zeroExtend32ToPtr(op1GPR, eax.gpr());
3329             done.append(m_jit.jump());
3330         
3331             notNeg2ToThe31.link(&m_jit);
3332         }
3333     
3334         safeDenominator.link(&m_jit);
3335     
3336         // If the user cares about negative zero, then speculate that we're not about
3337         // to produce negative zero.
3338         if (shouldCheckNegativeZero(node->arithMode())) {
3339             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3340             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3341             numeratorNonZero.link(&m_jit);
3342         }
3343     
3344         if (op2TempGPR != InvalidGPRReg) {
3345             m_jit.move(op2GPR, op2TempGPR);
3346             op2GPR = op2TempGPR;
3347         }
3348             
3349         m_jit.move(op1GPR, eax.gpr());
3350         m_jit.assembler().cdq();
3351         m_jit.assembler().idivl_r(op2GPR);
3352             
3353         if (op2TempGPR != InvalidGPRReg)
3354             unlock(op2TempGPR);
3355
3356         // Check that there was no remainder. If there had been, then we'd be obligated to
3357         // produce a double result instead.
3358         if (shouldCheckOverflow(node->arithMode()))
3359             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
3360         
3361         done.link(&m_jit);
3362         int32Result(eax.gpr(), node);
3363 #elif HAVE(ARM_IDIV_INSTRUCTIONS) || CPU(ARM64)
3364         SpeculateInt32Operand op1(this, node->child1());
3365         SpeculateInt32Operand op2(this, node->child2());
3366         GPRReg op1GPR = op1.gpr();
3367         GPRReg op2GPR = op2.gpr();
3368         GPRTemporary quotient(this);
3369         GPRTemporary multiplyAnswer(this);
3370
3371         // If the user cares about negative zero, then speculate that we're not about
3372         // to produce negative zero.
3373         if (shouldCheckNegativeZero(node->arithMode())) {
3374             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3375             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3376             numeratorNonZero.link(&m_jit);
3377         }
3378
3379         m_jit.assembler().sdiv<32>(quotient.gpr(), op1GPR, op2GPR);
3380
3381         // Check that there was no remainder. If there had been, then we'd be obligated to
3382         // produce a double result instead.
3383         if (shouldCheckOverflow(node->arithMode())) {
3384             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
3385             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
3386         }
3387
3388         int32Result(quotient.gpr(), node);
3389 #else
3390         RELEASE_ASSERT_NOT_REACHED();
3391 #endif
3392         break;
3393     }
3394         
3395     case DoubleRepUse: {
3396         SpeculateDoubleOperand op1(this, node->child1());
3397         SpeculateDoubleOperand op2(this, node->child2());
3398         FPRTemporary result(this, op1);
3399         
3400         FPRReg reg1 = op1.fpr();
3401         FPRReg reg2 = op2.fpr();
3402         m_jit.divDouble(reg1, reg2, result.fpr());
3403         
3404         doubleResult(result.fpr(), node);
3405         break;
3406     }
3407         
3408     default:
3409         RELEASE_ASSERT_NOT_REACHED();
3410         break;
3411     }
3412 }
3413
3414 void SpeculativeJIT::compileArithMod(Node* node)
3415 {
3416     switch (node->binaryUseKind()) {
3417     case Int32Use: {
3418         // In the fast path, the dividend value could be the final result
3419         // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
3420         SpeculateStrictInt32Operand op1(this, node->child1());
3421         
3422         if (node->child2()->isInt32Constant()) {
3423             int32_t divisor = node->child2()->asInt32();
3424             if (divisor > 1 && hasOneBitSet(divisor)) {
3425                 unsigned logarithm = WTF::fastLog2(divisor);
3426                 GPRReg dividendGPR = op1.gpr();
3427                 GPRTemporary result(this);
3428                 GPRReg resultGPR = result.gpr();
3429
3430                 // This is what LLVM generates. It's pretty crazy. Here's my
3431                 // attempt at understanding it.
3432                 
3433                 // First, compute either divisor - 1, or 0, depending on whether
3434                 // the dividend is negative:
3435                 //
3436                 // If dividend < 0:  resultGPR = divisor - 1
3437                 // If dividend >= 0: resultGPR = 0
3438                 m_jit.move(dividendGPR, resultGPR);
3439                 m_jit.rshift32(TrustedImm32(31), resultGPR);
3440                 m_jit.urshift32(TrustedImm32(32 - logarithm), resultGPR);
3441                 
3442                 // Add in the dividend, so that:
3443                 //
3444                 // If dividend < 0:  resultGPR = dividend + divisor - 1
3445                 // If dividend >= 0: resultGPR = dividend
3446                 m_jit.add32(dividendGPR, resultGPR);
3447                 
3448                 // Mask so as to only get the *high* bits. This rounds down
3449                 // (towards negative infinity) resultGPR to the nearest multiple
3450                 // of divisor, so that:
3451                 //
3452                 // If dividend < 0:  resultGPR = floor((dividend + divisor - 1) / divisor)
3453                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3454                 //
3455                 // Note that this can be simplified to:
3456                 //
3457                 // If dividend < 0:  resultGPR = ceil(dividend / divisor)
3458                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3459                 //
3460                 // Note that if the dividend is negative, resultGPR will also be negative.
3461                 // Regardless of the sign of dividend, resultGPR will be rounded towards
3462                 // zero, because of how things are conditionalized.
3463                 m_jit.and32(TrustedImm32(-divisor), resultGPR);
3464                 
3465                 // Subtract resultGPR from dividendGPR, which yields the remainder:
3466                 //
3467                 // resultGPR = dividendGPR - resultGPR
3468                 m_jit.neg32(resultGPR);
3469                 m_jit.add32(dividendGPR, resultGPR);
3470                 
3471                 if (shouldCheckNegativeZero(node->arithMode())) {
3472                     // Check that we're not about to create negative zero.
3473                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3474                     speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, resultGPR));
3475                     numeratorPositive.link(&m_jit);
3476                 }
3477
3478                 int32Result(resultGPR, node);
3479                 return;
3480             }
3481         }
3482         
3483 #if CPU(X86) || CPU(X86_64)
3484         if (node->child2()->isInt32Constant()) {
3485             int32_t divisor = node->child2()->asInt32();
3486             if (divisor && divisor != -1) {
3487                 GPRReg op1Gpr = op1.gpr();
3488
3489                 GPRTemporary eax(this, X86Registers::eax);
3490                 GPRTemporary edx(this, X86Registers::edx);
3491                 GPRTemporary scratch(this);
3492                 GPRReg scratchGPR = scratch.gpr();
3493
3494                 GPRReg op1SaveGPR;
3495                 if (op1Gpr == X86Registers::eax || op1Gpr == X86Registers::edx) {
3496                     op1SaveGPR = allocate();
3497                     ASSERT(op1Gpr != op1SaveGPR);
3498                     m_jit.move(op1Gpr, op1SaveGPR);
3499                 } else
3500                     op1SaveGPR = op1Gpr;
3501                 ASSERT(op1SaveGPR != X86Registers::eax);
3502                 ASSERT(op1SaveGPR != X86Registers::edx);
3503
3504                 m_jit.move(op1Gpr, eax.gpr());
3505                 m_jit.move(TrustedImm32(divisor), scratchGPR);
3506                 m_jit.assembler().cdq();
3507                 m_jit.assembler().idivl_r(scratchGPR);
3508                 if (shouldCheckNegativeZero(node->arithMode())) {
3509                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3510                     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3511                     numeratorPositive.link(&m_jit);
3512                 }
3513             
3514                 if (op1SaveGPR != op1Gpr)
3515                     unlock(op1SaveGPR);
3516
3517                 int32Result(edx.gpr(), node);
3518                 return;
3519             }
3520         }
3521 #endif
3522
3523         SpeculateInt32Operand op2(this, node->child2());
3524 #if CPU(X86) || CPU(X86_64)
3525         GPRTemporary eax(this, X86Registers::eax);
3526         GPRTemporary edx(this, X86Registers::edx);
3527         GPRReg op1GPR = op1.gpr();
3528         GPRReg op2GPR = op2.gpr();
3529     
3530         GPRReg op2TempGPR;
3531         GPRReg temp;
3532         GPRReg op1SaveGPR;
3533     
3534         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3535             op2TempGPR = allocate();
3536             temp = op2TempGPR;
3537         } else {
3538             op2TempGPR = InvalidGPRReg;
3539             if (op1GPR == X86Registers::eax)
3540                 temp = X86Registers::edx;
3541             else
3542                 temp = X86Registers::eax;
3543         }
3544     
3545         if (op1GPR == X86Registers::eax || op1GPR == X86Registers::edx) {
3546             op1SaveGPR = allocate();
3547             ASSERT(op1GPR != op1SaveGPR);
3548             m_jit.move(op1GPR, op1SaveGPR);
3549         } else
3550             op1SaveGPR = op1GPR;
3551     
3552         ASSERT(temp != op1GPR);
3553         ASSERT(temp != op2GPR);
3554         ASSERT(op1SaveGPR != X86Registers::eax);
3555         ASSERT(op1SaveGPR != X86Registers::edx);
3556     
3557         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3558     
3559         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3560     
3561         JITCompiler::JumpList done;
3562         
3563         // FIXME: -2^31 / -1 will actually yield negative zero, so we could have a
3564         // separate case for that. But it probably doesn't matter so much.
3565         if (shouldCheckOverflow(node->arithMode())) {
3566             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3567             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3568         } else {
3569             // This is the case where we convert the result to an int after we're done, and we
3570             // already know that the denominator is either -1 or 0. So, if the denominator is
3571             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3572             // -1) and the numerator is -2^31 then the result should be 0. Otherwise we are
3573             // happy to fall through to a normal division, since we're just dividing something
3574             // by negative 1.
3575         
3576             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3577             m_jit.move(TrustedImm32(0), edx.gpr());
3578             done.append(m_jit.jump());
3579         
3580             notZero.link(&m_jit);
3581             JITCompiler::Jump notNeg2ToThe31 =
3582                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3583             m_jit.move(TrustedImm32(0), edx.gpr());
3584             done.append(m_jit.jump());
3585         
3586             notNeg2ToThe31.link(&m_jit);
3587         }
3588         
3589         safeDenominator.link(&m_jit);
3590             
3591         if (op2TempGPR != InvalidGPRReg) {
3592             m_jit.move(op2GPR, op2TempGPR);
3593             op2GPR = op2TempGPR;
3594         }
3595             
3596         m_jit.move(op1GPR, eax.gpr());
3597         m_jit.assembler().cdq();
3598         m_jit.assembler().idivl_r(op2GPR);
3599             
3600         if (op2TempGPR != InvalidGPRReg)
3601             unlock(op2TempGPR);
3602
3603         // Check that we're not about to create negative zero.
3604         if (shouldCheckNegativeZero(node->arithMode())) {
3605             JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3606             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3607             numeratorPositive.link(&m_jit);
3608         }
3609     
3610         if (op1SaveGPR != op1GPR)
3611             unlock(op1SaveGPR);
3612             
3613         done.link(&m_jit);
3614         int32Result(edx.gpr(), node);
3615
3616 #elif HAVE(ARM_IDIV_INSTRUCTIONS) || CPU(ARM64)
3617         GPRTemporary temp(this);
3618         GPRTemporary quotientThenRemainder(this);
3619         GPRTemporary multiplyAnswer(this);
3620         GPRReg dividendGPR = op1.gpr();
3621         GPRReg divisorGPR = op2.gpr();
3622         GPRReg quotientThenRemainderGPR = quotientThenRemainder.gpr();
3623         GPRReg multiplyAnswerGPR = multiplyAnswer.gpr();
3624
3625         JITCompiler::JumpList done;
3626     
3627         if (shouldCheckOverflow(node->arithMode()))
3628             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, divisorGPR));
3629         else {
3630             JITCompiler::Jump denominatorNotZero = m_jit.branchTest32(JITCompiler::NonZero, divisorGPR);
3631             m_jit.move(divisorGPR, quotientThenRemainderGPR);
3632             done.append(m_jit.jump());
3633             denominatorNotZero.link(&m_jit);
3634         }
3635
3636         m_jit.assembler().sdiv<32>(quotientThenRemainderGPR, dividendGPR, divisorGPR);
3637         // FIXME: It seems like there are cases where we don't need this? What if we have
3638         // arithMode() == Arith::Unchecked?
3639         // https://bugs.webkit.org/show_bug.cgi?id=126444
3640         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotientThenRemainderGPR, divisorGPR, multiplyAnswerGPR));
3641 #if HAVE(ARM_IDIV_INSTRUCTIONS)
3642         m_jit.assembler().sub(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3643 #else
3644         m_jit.assembler().sub<32>(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3645 #endif
3646
3647         // If the user cares about negative zero, then speculate that we're not about
3648         // to produce negative zero.
3649         if (shouldCheckNegativeZero(node->arithMode())) {
3650             // Check that we're not about to create negative zero.
3651             JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3652             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, quotientThenRemainderGPR));
3653             numeratorPositive.link(&m_jit);
3654         }
3655
3656         done.link(&m_jit);
3657
3658         int32Result(quotientThenRemainderGPR, node);
3659 #else // not architecture that can do integer division
3660         RELEASE_ASSERT_NOT_REACHED();
3661 #endif
3662         return;
3663     }
3664         
3665     case DoubleRepUse: {
3666         SpeculateDoubleOperand op1(this, node->child1());
3667         SpeculateDoubleOperand op2(this, node->child2());
3668         
3669         FPRReg op1FPR = op1.fpr();
3670         FPRReg op2FPR = op2.fpr();
3671         
3672         flushRegisters();
3673         
3674         FPRResult result(this);
3675         
3676         callOperation(fmodAsDFGOperation, result.fpr(), op1FPR, op2FPR);
3677         
3678         doubleResult(result.fpr(), node);
3679         return;
3680     }
3681         
3682     default:
3683         RELEASE_ASSERT_NOT_REACHED();
3684         return;
3685     }
3686 }
3687
3688 void SpeculativeJIT::compileArithRound(Node* node)
3689 {
3690     ASSERT(node->child1().useKind() == DoubleRepUse);
3691
3692     SpeculateDoubleOperand value(this, node->child1());
3693     FPRReg valueFPR = value.fpr();
3694
3695     if (producesInteger(node->arithRoundingMode()) && !shouldCheckNegativeZero(node->arithRoundingMode())) {
3696         FPRTemporary oneHalf(this);
3697         GPRTemporary roundedResultAsInt32(this);
3698         FPRReg oneHalfFPR = oneHalf.fpr();
3699         GPRReg resultGPR = roundedResultAsInt32.gpr();
3700
3701         static const double halfConstant = 0.5;
3702         m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), oneHalfFPR);
3703         m_jit.addDouble(valueFPR, oneHalfFPR);
3704
3705         JITCompiler::Jump truncationFailed = m_jit.branchTruncateDoubleToInt32(oneHalfFPR, resultGPR);
3706         speculationCheck(Overflow, JSValueRegs(), node, truncationFailed);
3707         int32Result(resultGPR, node);
3708         return;
3709     }
3710
3711     flushRegisters();
3712     FPRResult roundedResultAsDouble(this);
3713     FPRReg resultFPR = roundedResultAsDouble.fpr();
3714     callOperation(jsRound, resultFPR, valueFPR);
3715     m_jit.exceptionCheck();
3716     if (producesInteger(node->arithRoundingMode())) {
3717         GPRTemporary roundedResultAsInt32(this);
3718         FPRTemporary scratch(this);
3719         FPRReg scratchFPR = scratch.fpr();
3720         GPRReg resultGPR = roundedResultAsInt32.gpr();
3721         JITCompiler::JumpList failureCases;
3722         m_jit.branchConvertDoubleToInt32(resultFPR, resultGPR, failureCases, scratchFPR);
3723         speculationCheck(Overflow, JSValueRegs(), node, failureCases);
3724
3725         int32Result(resultGPR, node);
3726     } else
3727         doubleResult(resultFPR, node);
3728 }
3729
3730 void SpeculativeJIT::compileArithSqrt(Node* node)
3731 {
3732     SpeculateDoubleOperand op1(this, node->child1());
3733     FPRReg op1FPR = op1.fpr();
3734
3735     if (!MacroAssembler::supportsFloatingPointSqrt() || !Options::enableArchitectureSpecificOptimizations()) {
3736         flushRegisters();
3737         FPRResult result(this);
3738         callOperation(sqrt, result.fpr(), op1FPR);
3739         doubleResult(result.fpr(), node);
3740     } else {
3741         FPRTemporary result(this, op1);
3742         m_jit.sqrtDouble(op1.fpr(), result.fpr());
3743         doubleResult(result.fpr(), node);
3744     }
3745 }
3746
3747 // For small positive integers , it is worth doing a tiny inline loop to exponentiate the base.
3748 // Every register is clobbered by this helper.
3749 static MacroAssembler::Jump compileArithPowIntegerFastPath(JITCompiler& assembler, FPRReg xOperand, GPRReg yOperand, FPRReg result)
3750 {
3751     MacroAssembler::JumpList skipFastPath;
3752     skipFastPath.append(assembler.branch32(MacroAssembler::LessThan, yOperand, MacroAssembler::TrustedImm32(0)));
3753     skipFastPath.append(assembler.branch32(MacroAssembler::GreaterThan, yOperand, MacroAssembler::TrustedImm32(1000)));
3754
3755     static const double oneConstant = 1.0;
3756     assembler.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant), result);
3757
3758     MacroAssembler::Label startLoop(assembler.label());
3759     MacroAssembler::Jump exponentIsEven = assembler.branchTest32(MacroAssembler::Zero, yOperand, MacroAssembler::TrustedImm32(1));
3760     assembler.mulDouble(xOperand, result);
3761     exponentIsEven.link(&assembler);
3762     assembler.mulDouble(xOperand, xOperand);
3763     assembler.rshift32(MacroAssembler::TrustedImm32(1), yOperand);
3764     assembler.branchTest32(MacroAssembler::NonZero, yOperand).linkTo(startLoop, &assembler);
3765
3766     MacroAssembler::Jump skipSlowPath = assembler.jump();
3767     skipFastPath.link(&assembler);
3768
3769     return skipSlowPath;
3770 }
3771
3772 void SpeculativeJIT::compileArithPow(Node* node)
3773 {
3774     if (node->child2().useKind() == Int32Use) {
3775         SpeculateDoubleOperand xOperand(this, node->child1());
3776         SpeculateInt32Operand yOperand(this, node->child2());
3777         FPRReg xOperandfpr = xOperand.fpr();
3778         GPRReg yOperandGpr = yOperand.gpr();
3779         FPRTemporary yOperandfpr(this);
3780
3781         flushRegisters();
3782
3783         FPRResult result(this);
3784         FPRReg resultFpr = result.fpr();
3785
3786         FPRTemporary xOperandCopy(this);
3787         FPRReg xOperandCopyFpr = xOperandCopy.fpr();
3788         m_jit.moveDouble(xOperandfpr, xOperandCopyFpr);
3789
3790         GPRTemporary counter(this);
3791         GPRReg counterGpr = counter.gpr();
3792         m_jit.move(yOperandGpr, counterGpr);
3793
3794         MacroAssembler::Jump skipFallback = compileArithPowIntegerFastPath(m_jit, xOperandCopyFpr, counterGpr, resultFpr);
3795         m_jit.convertInt32To