OSR exit fuzzing should allow us to select a static exit site
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGOSRExitFuzz.h"
38 #include "DFGSaneStringGetByValSlowPathGenerator.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DirectArguments.h"
41 #include "JSCInlines.h"
42 #include "JSEnvironmentRecord.h"
43 #include "JSLexicalEnvironment.h"
44 #include "LinkBuffer.h"
45 #include "ScopedArguments.h"
46 #include "ScratchRegisterAllocator.h"
47 #include "WriteBarrierBuffer.h"
48 #include <wtf/MathExtras.h>
49
50 namespace JSC { namespace DFG {
51
52 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
53     : m_compileOkay(true)
54     , m_jit(jit)
55     , m_currentNode(0)
56     , m_lastGeneratedNode(LastNodeType)
57     , m_indexInBlock(0)
58     , m_generationInfo(m_jit.graph().frameRegisterCount())
59     , m_state(m_jit.graph())
60     , m_interpreter(m_jit.graph(), m_state)
61     , m_stream(&jit.jitCode()->variableEventStream)
62     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
63     , m_isCheckingArgumentTypes(false)
64 {
65 }
66
67 SpeculativeJIT::~SpeculativeJIT()
68 {
69 }
70
71 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
72 {
73     ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
74     
75     GPRTemporary scratch(this);
76     GPRTemporary scratch2(this);
77     GPRReg scratchGPR = scratch.gpr();
78     GPRReg scratch2GPR = scratch2.gpr();
79     
80     unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
81     
82     JITCompiler::JumpList slowCases;
83     
84     slowCases.append(
85         emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
86     m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
87     emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
88     
89     m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
90     m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
91     
92     if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
93 #if USE(JSVALUE64)
94         m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
95         for (unsigned i = numElements; i < vectorLength; ++i)
96             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
97 #else
98         EncodedValueDescriptor value;
99         value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
100         for (unsigned i = numElements; i < vectorLength; ++i) {
101             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
102             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
103         }
104 #endif
105     }
106     
107     // I want a slow path that also loads out the storage pointer, and that's
108     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
109     // of work for a very small piece of functionality. :-/
110     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
111         slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
112         structure, numElements));
113 }
114
115 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
116 {
117     if (inlineCallFrame && !inlineCallFrame->isVarargs())
118         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
119     else {
120         VirtualRegister argumentCountRegister;
121         if (!inlineCallFrame)
122             argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
123         else
124             argumentCountRegister = inlineCallFrame->argumentCountRegister;
125         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
126         if (!includeThis)
127             m_jit.sub32(TrustedImm32(1), lengthGPR);
128     }
129 }
130
131 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
132 {
133     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
134 }
135
136 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
137 {
138     if (origin.inlineCallFrame) {
139         if (origin.inlineCallFrame->isClosureCall) {
140             m_jit.loadPtr(
141                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
142                 calleeGPR);
143         } else {
144             m_jit.move(
145                 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
146                 calleeGPR);
147         }
148     } else
149         m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), calleeGPR);
150 }
151
152 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
153 {
154     m_jit.addPtr(
155         TrustedImm32(
156             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
157         GPRInfo::callFrameRegister, startGPR);
158 }
159
160 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
161 {
162     if (!doOSRExitFuzzing())
163         return MacroAssembler::Jump();
164     
165     MacroAssembler::Jump result;
166     
167     m_jit.pushToSave(GPRInfo::regT0);
168     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
169     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
170     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
171     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
172     unsigned at = Options::fireOSRExitFuzzAt();
173     if (at || atOrAfter) {
174         unsigned threshold;
175         MacroAssembler::RelationalCondition condition;
176         if (atOrAfter) {
177             threshold = atOrAfter;
178             condition = MacroAssembler::Below;
179         } else {
180             threshold = at;
181             condition = MacroAssembler::NotEqual;
182         }
183         MacroAssembler::Jump ok = m_jit.branch32(
184             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
185         m_jit.popToRestore(GPRInfo::regT0);
186         result = m_jit.jump();
187         ok.link(&m_jit);
188     }
189     m_jit.popToRestore(GPRInfo::regT0);
190     
191     return result;
192 }
193
194 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
195 {
196     if (!m_compileOkay)
197         return;
198     ASSERT(m_isCheckingArgumentTypes || m_canExit);
199     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
200     if (fuzzJump.isSet()) {
201         JITCompiler::JumpList jumpsToFail;
202         jumpsToFail.append(fuzzJump);
203         jumpsToFail.append(jumpToFail);
204         m_jit.appendExitInfo(jumpsToFail);
205     } else
206         m_jit.appendExitInfo(jumpToFail);
207     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
208 }
209
210 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
211 {
212     if (!m_compileOkay)
213         return;
214     ASSERT(m_isCheckingArgumentTypes || m_canExit);
215     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
216     if (fuzzJump.isSet()) {
217         JITCompiler::JumpList myJumpsToFail;
218         myJumpsToFail.append(jumpsToFail);
219         myJumpsToFail.append(fuzzJump);
220         m_jit.appendExitInfo(myJumpsToFail);
221     } else
222         m_jit.appendExitInfo(jumpsToFail);
223     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
224 }
225
226 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
227 {
228     if (!m_compileOkay)
229         return OSRExitJumpPlaceholder();
230     ASSERT(m_isCheckingArgumentTypes || m_canExit);
231     unsigned index = m_jit.jitCode()->osrExit.size();
232     m_jit.appendExitInfo();
233     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
234     return OSRExitJumpPlaceholder(index);
235 }
236
237 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
238 {
239     ASSERT(m_isCheckingArgumentTypes || m_canExit);
240     return speculationCheck(kind, jsValueSource, nodeUse.node());
241 }
242
243 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
244 {
245     ASSERT(m_isCheckingArgumentTypes || m_canExit);
246     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
247 }
248
249 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
250 {
251     ASSERT(m_isCheckingArgumentTypes || m_canExit);
252     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
253 }
254
255 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
256 {
257     if (!m_compileOkay)
258         return;
259     ASSERT(m_isCheckingArgumentTypes || m_canExit);
260     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
261     m_jit.appendExitInfo(jumpToFail);
262     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
263 }
264
265 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
266 {
267     ASSERT(m_isCheckingArgumentTypes || m_canExit);
268     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
269 }
270
271 void SpeculativeJIT::emitInvalidationPoint(Node* node)
272 {
273     if (!m_compileOkay)
274         return;
275     ASSERT(m_canExit);
276     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
277     m_jit.jitCode()->appendOSRExit(OSRExit(
278         UncountableInvalidation, JSValueSource(),
279         m_jit.graph().methodOfGettingAValueProfileFor(node),
280         this, m_stream->size()));
281     info.m_replacementSource = m_jit.watchpointLabel();
282     ASSERT(info.m_replacementSource.isSet());
283     noResult(node);
284 }
285
286 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
287 {
288     ASSERT(m_isCheckingArgumentTypes || m_canExit);
289     if (!m_compileOkay)
290         return;
291     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
292     m_compileOkay = false;
293     if (verboseCompilationEnabled())
294         dataLog("Bailing compilation.\n");
295 }
296
297 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
298 {
299     ASSERT(m_isCheckingArgumentTypes || m_canExit);
300     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
301 }
302
303 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
304 {
305     ASSERT(needsTypeCheck(edge, typesPassedThrough));
306     m_interpreter.filter(edge, typesPassedThrough);
307     speculationCheck(BadType, source, edge.node(), jumpToFail);
308 }
309
310 RegisterSet SpeculativeJIT::usedRegisters()
311 {
312     RegisterSet result;
313     
314     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
315         GPRReg gpr = GPRInfo::toRegister(i);
316         if (m_gprs.isInUse(gpr))
317             result.set(gpr);
318     }
319     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
320         FPRReg fpr = FPRInfo::toRegister(i);
321         if (m_fprs.isInUse(fpr))
322             result.set(fpr);
323     }
324     
325     result.merge(RegisterSet::specialRegisters());
326     
327     return result;
328 }
329
330 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
331 {
332     m_slowPathGenerators.append(WTF::move(slowPathGenerator));
333 }
334
335 void SpeculativeJIT::runSlowPathGenerators()
336 {
337     for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
338         m_slowPathGenerators[i]->generate(this);
339 }
340
341 // On Windows we need to wrap fmod; on other platforms we can call it directly.
342 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
343 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
344 static double JIT_OPERATION fmodAsDFGOperation(double x, double y)
345 {
346     return fmod(x, y);
347 }
348 #else
349 #define fmodAsDFGOperation fmod
350 #endif
351
352 void SpeculativeJIT::clearGenerationInfo()
353 {
354     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
355         m_generationInfo[i] = GenerationInfo();
356     m_gprs = RegisterBank<GPRInfo>();
357     m_fprs = RegisterBank<FPRInfo>();
358 }
359
360 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
361 {
362     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
363     Node* node = info.node();
364     DataFormat registerFormat = info.registerFormat();
365     ASSERT(registerFormat != DataFormatNone);
366     ASSERT(registerFormat != DataFormatDouble);
367         
368     SilentSpillAction spillAction;
369     SilentFillAction fillAction;
370         
371     if (!info.needsSpill())
372         spillAction = DoNothingForSpill;
373     else {
374 #if USE(JSVALUE64)
375         ASSERT(info.gpr() == source);
376         if (registerFormat == DataFormatInt32)
377             spillAction = Store32Payload;
378         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
379             spillAction = StorePtr;
380         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
381             spillAction = Store64;
382         else {
383             ASSERT(registerFormat & DataFormatJS);
384             spillAction = Store64;
385         }
386 #elif USE(JSVALUE32_64)
387         if (registerFormat & DataFormatJS) {
388             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
389             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
390         } else {
391             ASSERT(info.gpr() == source);
392             spillAction = Store32Payload;
393         }
394 #endif
395     }
396         
397     if (registerFormat == DataFormatInt32) {
398         ASSERT(info.gpr() == source);
399         ASSERT(isJSInt32(info.registerFormat()));
400         if (node->hasConstant()) {
401             ASSERT(node->isInt32Constant());
402             fillAction = SetInt32Constant;
403         } else
404             fillAction = Load32Payload;
405     } else if (registerFormat == DataFormatBoolean) {
406 #if USE(JSVALUE64)
407         RELEASE_ASSERT_NOT_REACHED();
408 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
409         fillAction = DoNothingForFill;
410 #endif
411 #elif USE(JSVALUE32_64)
412         ASSERT(info.gpr() == source);
413         if (node->hasConstant()) {
414             ASSERT(node->isBooleanConstant());
415             fillAction = SetBooleanConstant;
416         } else
417             fillAction = Load32Payload;
418 #endif
419     } else if (registerFormat == DataFormatCell) {
420         ASSERT(info.gpr() == source);
421         if (node->hasConstant()) {
422             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
423             node->asCell(); // To get the assertion.
424             fillAction = SetCellConstant;
425         } else {
426 #if USE(JSVALUE64)
427             fillAction = LoadPtr;
428 #else
429             fillAction = Load32Payload;
430 #endif
431         }
432     } else if (registerFormat == DataFormatStorage) {
433         ASSERT(info.gpr() == source);
434         fillAction = LoadPtr;
435     } else if (registerFormat == DataFormatInt52) {
436         if (node->hasConstant())
437             fillAction = SetInt52Constant;
438         else if (info.spillFormat() == DataFormatInt52)
439             fillAction = Load64;
440         else if (info.spillFormat() == DataFormatStrictInt52)
441             fillAction = Load64ShiftInt52Left;
442         else if (info.spillFormat() == DataFormatNone)
443             fillAction = Load64;
444         else {
445             RELEASE_ASSERT_NOT_REACHED();
446 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
447             fillAction = Load64; // Make GCC happy.
448 #endif
449         }
450     } else if (registerFormat == DataFormatStrictInt52) {
451         if (node->hasConstant())
452             fillAction = SetStrictInt52Constant;
453         else if (info.spillFormat() == DataFormatInt52)
454             fillAction = Load64ShiftInt52Right;
455         else if (info.spillFormat() == DataFormatStrictInt52)
456             fillAction = Load64;
457         else if (info.spillFormat() == DataFormatNone)
458             fillAction = Load64;
459         else {
460             RELEASE_ASSERT_NOT_REACHED();
461 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
462             fillAction = Load64; // Make GCC happy.
463 #endif
464         }
465     } else {
466         ASSERT(registerFormat & DataFormatJS);
467 #if USE(JSVALUE64)
468         ASSERT(info.gpr() == source);
469         if (node->hasConstant()) {
470             if (node->isCellConstant())
471                 fillAction = SetTrustedJSConstant;
472             else
473                 fillAction = SetJSConstant;
474         } else if (info.spillFormat() == DataFormatInt32) {
475             ASSERT(registerFormat == DataFormatJSInt32);
476             fillAction = Load32PayloadBoxInt;
477         } else
478             fillAction = Load64;
479 #else
480         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
481         if (node->hasConstant())
482             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
483         else if (info.payloadGPR() == source)
484             fillAction = Load32Payload;
485         else { // Fill the Tag
486             switch (info.spillFormat()) {
487             case DataFormatInt32:
488                 ASSERT(registerFormat == DataFormatJSInt32);
489                 fillAction = SetInt32Tag;
490                 break;
491             case DataFormatCell:
492                 ASSERT(registerFormat == DataFormatJSCell);
493                 fillAction = SetCellTag;
494                 break;
495             case DataFormatBoolean:
496                 ASSERT(registerFormat == DataFormatJSBoolean);
497                 fillAction = SetBooleanTag;
498                 break;
499             default:
500                 fillAction = Load32Tag;
501                 break;
502             }
503         }
504 #endif
505     }
506         
507     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
508 }
509     
510 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
511 {
512     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
513     Node* node = info.node();
514     ASSERT(info.registerFormat() == DataFormatDouble);
515
516     SilentSpillAction spillAction;
517     SilentFillAction fillAction;
518         
519     if (!info.needsSpill())
520         spillAction = DoNothingForSpill;
521     else {
522         ASSERT(!node->hasConstant());
523         ASSERT(info.spillFormat() == DataFormatNone);
524         ASSERT(info.fpr() == source);
525         spillAction = StoreDouble;
526     }
527         
528 #if USE(JSVALUE64)
529     if (node->hasConstant()) {
530         node->asNumber(); // To get the assertion.
531         fillAction = SetDoubleConstant;
532     } else {
533         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
534         fillAction = LoadDouble;
535     }
536 #elif USE(JSVALUE32_64)
537     ASSERT(info.registerFormat() == DataFormatDouble);
538     if (node->hasConstant()) {
539         node->asNumber(); // To get the assertion.
540         fillAction = SetDoubleConstant;
541     } else
542         fillAction = LoadDouble;
543 #endif
544
545     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
546 }
547     
548 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
549 {
550     switch (plan.spillAction()) {
551     case DoNothingForSpill:
552         break;
553     case Store32Tag:
554         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
555         break;
556     case Store32Payload:
557         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
558         break;
559     case StorePtr:
560         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
561         break;
562 #if USE(JSVALUE64)
563     case Store64:
564         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
565         break;
566 #endif
567     case StoreDouble:
568         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
569         break;
570     default:
571         RELEASE_ASSERT_NOT_REACHED();
572     }
573 }
574     
575 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
576 {
577 #if USE(JSVALUE32_64)
578     UNUSED_PARAM(canTrample);
579 #endif
580     switch (plan.fillAction()) {
581     case DoNothingForFill:
582         break;
583     case SetInt32Constant:
584         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
585         break;
586 #if USE(JSVALUE64)
587     case SetInt52Constant:
588         m_jit.move(Imm64(plan.node()->asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
589         break;
590     case SetStrictInt52Constant:
591         m_jit.move(Imm64(plan.node()->asMachineInt()), plan.gpr());
592         break;
593 #endif // USE(JSVALUE64)
594     case SetBooleanConstant:
595         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
596         break;
597     case SetCellConstant:
598         m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
599         break;
600 #if USE(JSVALUE64)
601     case SetTrustedJSConstant:
602         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
603         break;
604     case SetJSConstant:
605         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
606         break;
607     case SetDoubleConstant:
608         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
609         m_jit.move64ToDouble(canTrample, plan.fpr());
610         break;
611     case Load32PayloadBoxInt:
612         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
613         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
614         break;
615     case Load32PayloadConvertToInt52:
616         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
617         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
618         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
619         break;
620     case Load32PayloadSignExtend:
621         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
622         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
623         break;
624 #else
625     case SetJSConstantTag:
626         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
627         break;
628     case SetJSConstantPayload:
629         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
630         break;
631     case SetInt32Tag:
632         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
633         break;
634     case SetCellTag:
635         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
636         break;
637     case SetBooleanTag:
638         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
639         break;
640     case SetDoubleConstant:
641         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
642         break;
643 #endif
644     case Load32Tag:
645         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
646         break;
647     case Load32Payload:
648         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
649         break;
650     case LoadPtr:
651         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
652         break;
653 #if USE(JSVALUE64)
654     case Load64:
655         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
656         break;
657     case Load64ShiftInt52Right:
658         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
659         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
660         break;
661     case Load64ShiftInt52Left:
662         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
663         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
664         break;
665 #endif
666     case LoadDouble:
667         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
668         break;
669     default:
670         RELEASE_ASSERT_NOT_REACHED();
671     }
672 }
673     
674 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
675 {
676     switch (arrayMode.arrayClass()) {
677     case Array::OriginalArray: {
678         CRASH();
679 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
680         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
681         return result;
682 #endif
683     }
684         
685     case Array::Array:
686         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
687         return m_jit.branch32(
688             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
689         
690     case Array::NonArray:
691     case Array::OriginalNonArray:
692         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
693         return m_jit.branch32(
694             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
695         
696     case Array::PossiblyArray:
697         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
698         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
699     }
700     
701     RELEASE_ASSERT_NOT_REACHED();
702     return JITCompiler::Jump();
703 }
704
705 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
706 {
707     JITCompiler::JumpList result;
708     
709     switch (arrayMode.type()) {
710     case Array::Int32:
711         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
712
713     case Array::Double:
714         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
715
716     case Array::Contiguous:
717         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
718
719     case Array::ArrayStorage:
720     case Array::SlowPutArrayStorage: {
721         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
722         
723         if (arrayMode.isJSArray()) {
724             if (arrayMode.isSlowPut()) {
725                 result.append(
726                     m_jit.branchTest32(
727                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
728                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
729                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
730                 result.append(
731                     m_jit.branch32(
732                         MacroAssembler::Above, tempGPR,
733                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
734                 break;
735             }
736             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
737             result.append(
738                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
739             break;
740         }
741         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
742         if (arrayMode.isSlowPut()) {
743             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
744             result.append(
745                 m_jit.branch32(
746                     MacroAssembler::Above, tempGPR,
747                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
748             break;
749         }
750         result.append(
751             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
752         break;
753     }
754     default:
755         CRASH();
756         break;
757     }
758     
759     return result;
760 }
761
762 void SpeculativeJIT::checkArray(Node* node)
763 {
764     ASSERT(node->arrayMode().isSpecific());
765     ASSERT(!node->arrayMode().doesConversion());
766     
767     SpeculateCellOperand base(this, node->child1());
768     GPRReg baseReg = base.gpr();
769     
770     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
771         noResult(m_currentNode);
772         return;
773     }
774     
775     const ClassInfo* expectedClassInfo = 0;
776     
777     switch (node->arrayMode().type()) {
778     case Array::String:
779         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
780         break;
781     case Array::Int32:
782     case Array::Double:
783     case Array::Contiguous:
784     case Array::ArrayStorage:
785     case Array::SlowPutArrayStorage: {
786         GPRTemporary temp(this);
787         GPRReg tempGPR = temp.gpr();
788         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
789         speculationCheck(
790             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
791             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
792         
793         noResult(m_currentNode);
794         return;
795     }
796     case Array::DirectArguments:
797         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
798         noResult(m_currentNode);
799         return;
800     case Array::ScopedArguments:
801         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
802         noResult(m_currentNode);
803         return;
804     default:
805         speculateCellTypeWithoutTypeFiltering(
806             node->child1(), baseReg,
807             typeForTypedArrayType(node->arrayMode().typedArrayType()));
808         noResult(m_currentNode);
809         return;
810     }
811     
812     RELEASE_ASSERT(expectedClassInfo);
813     
814     GPRTemporary temp(this);
815     GPRTemporary temp2(this);
816     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
817     speculationCheck(
818         BadType, JSValueSource::unboxedCell(baseReg), node,
819         m_jit.branchPtr(
820             MacroAssembler::NotEqual,
821             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
822             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
823     
824     noResult(m_currentNode);
825 }
826
827 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
828 {
829     ASSERT(node->arrayMode().doesConversion());
830     
831     GPRTemporary temp(this);
832     GPRTemporary structure;
833     GPRReg tempGPR = temp.gpr();
834     GPRReg structureGPR = InvalidGPRReg;
835     
836     if (node->op() != ArrayifyToStructure) {
837         GPRTemporary realStructure(this);
838         structure.adopt(realStructure);
839         structureGPR = structure.gpr();
840     }
841         
842     // We can skip all that comes next if we already have array storage.
843     MacroAssembler::JumpList slowPath;
844     
845     if (node->op() == ArrayifyToStructure) {
846         slowPath.append(m_jit.branchWeakStructure(
847             JITCompiler::NotEqual,
848             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
849             node->structure()));
850     } else {
851         m_jit.load8(
852             MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
853         
854         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
855     }
856     
857     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
858         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
859     
860     noResult(m_currentNode);
861 }
862
863 void SpeculativeJIT::arrayify(Node* node)
864 {
865     ASSERT(node->arrayMode().isSpecific());
866     
867     SpeculateCellOperand base(this, node->child1());
868     
869     if (!node->child2()) {
870         arrayify(node, base.gpr(), InvalidGPRReg);
871         return;
872     }
873     
874     SpeculateInt32Operand property(this, node->child2());
875     
876     arrayify(node, base.gpr(), property.gpr());
877 }
878
879 GPRReg SpeculativeJIT::fillStorage(Edge edge)
880 {
881     VirtualRegister virtualRegister = edge->virtualRegister();
882     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
883     
884     switch (info.registerFormat()) {
885     case DataFormatNone: {
886         if (info.spillFormat() == DataFormatStorage) {
887             GPRReg gpr = allocate();
888             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
889             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
890             info.fillStorage(*m_stream, gpr);
891             return gpr;
892         }
893         
894         // Must be a cell; fill it as a cell and then return the pointer.
895         return fillSpeculateCell(edge);
896     }
897         
898     case DataFormatStorage: {
899         GPRReg gpr = info.gpr();
900         m_gprs.lock(gpr);
901         return gpr;
902     }
903         
904     default:
905         return fillSpeculateCell(edge);
906     }
907 }
908
909 void SpeculativeJIT::useChildren(Node* node)
910 {
911     if (node->flags() & NodeHasVarArgs) {
912         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
913             if (!!m_jit.graph().m_varArgChildren[childIdx])
914                 use(m_jit.graph().m_varArgChildren[childIdx]);
915         }
916     } else {
917         Edge child1 = node->child1();
918         if (!child1) {
919             ASSERT(!node->child2() && !node->child3());
920             return;
921         }
922         use(child1);
923         
924         Edge child2 = node->child2();
925         if (!child2) {
926             ASSERT(!node->child3());
927             return;
928         }
929         use(child2);
930         
931         Edge child3 = node->child3();
932         if (!child3)
933             return;
934         use(child3);
935     }
936 }
937
938 void SpeculativeJIT::compileIn(Node* node)
939 {
940     SpeculateCellOperand base(this, node->child2());
941     GPRReg baseGPR = base.gpr();
942     
943     if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
944         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
945             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo();
946             
947             GPRTemporary result(this);
948             GPRReg resultGPR = result.gpr();
949
950             use(node->child1());
951             
952             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
953             MacroAssembler::Label done = m_jit.label();
954             
955             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
956             // we can cast it to const AtomicStringImpl* safely.
957             auto slowPath = slowPathCall(
958                 jump.m_jump, this, operationInOptimize,
959                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
960                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
961             
962             stubInfo->codeOrigin = node->origin.semantic;
963             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
964             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
965             stubInfo->patch.usedRegisters = usedRegisters();
966             stubInfo->patch.spillMode = NeedToSpill;
967
968             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
969             addSlowPathGenerator(WTF::move(slowPath));
970
971             base.use();
972
973             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
974             return;
975         }
976     }
977
978     JSValueOperand key(this, node->child1());
979     JSValueRegs regs = key.jsValueRegs();
980         
981     GPRFlushedCallResult result(this);
982     GPRReg resultGPR = result.gpr();
983         
984     base.use();
985     key.use();
986         
987     flushRegisters();
988     callOperation(
989         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
990         baseGPR, regs);
991     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
992 }
993
994 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
995 {
996     unsigned branchIndexInBlock = detectPeepHoleBranch();
997     if (branchIndexInBlock != UINT_MAX) {
998         Node* branchNode = m_block->at(branchIndexInBlock);
999
1000         ASSERT(node->adjustedRefCount() == 1);
1001         
1002         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1003     
1004         m_indexInBlock = branchIndexInBlock;
1005         m_currentNode = branchNode;
1006         
1007         return true;
1008     }
1009     
1010     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1011     
1012     return false;
1013 }
1014
1015 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1016 {
1017     unsigned branchIndexInBlock = detectPeepHoleBranch();
1018     if (branchIndexInBlock != UINT_MAX) {
1019         Node* branchNode = m_block->at(branchIndexInBlock);
1020
1021         ASSERT(node->adjustedRefCount() == 1);
1022         
1023         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1024     
1025         m_indexInBlock = branchIndexInBlock;
1026         m_currentNode = branchNode;
1027         
1028         return true;
1029     }
1030     
1031     nonSpeculativeNonPeepholeStrictEq(node, invert);
1032     
1033     return false;
1034 }
1035
1036 static const char* dataFormatString(DataFormat format)
1037 {
1038     // These values correspond to the DataFormat enum.
1039     const char* strings[] = {
1040         "[  ]",
1041         "[ i]",
1042         "[ d]",
1043         "[ c]",
1044         "Err!",
1045         "Err!",
1046         "Err!",
1047         "Err!",
1048         "[J ]",
1049         "[Ji]",
1050         "[Jd]",
1051         "[Jc]",
1052         "Err!",
1053         "Err!",
1054         "Err!",
1055         "Err!",
1056     };
1057     return strings[format];
1058 }
1059
1060 void SpeculativeJIT::dump(const char* label)
1061 {
1062     if (label)
1063         dataLogF("<%s>\n", label);
1064
1065     dataLogF("  gprs:\n");
1066     m_gprs.dump();
1067     dataLogF("  fprs:\n");
1068     m_fprs.dump();
1069     dataLogF("  VirtualRegisters:\n");
1070     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1071         GenerationInfo& info = m_generationInfo[i];
1072         if (info.alive())
1073             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1074         else
1075             dataLogF("    % 3d:[__][__]", i);
1076         if (info.registerFormat() == DataFormatDouble)
1077             dataLogF(":fpr%d\n", info.fpr());
1078         else if (info.registerFormat() != DataFormatNone
1079 #if USE(JSVALUE32_64)
1080             && !(info.registerFormat() & DataFormatJS)
1081 #endif
1082             ) {
1083             ASSERT(info.gpr() != InvalidGPRReg);
1084             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1085         } else
1086             dataLogF("\n");
1087     }
1088     if (label)
1089         dataLogF("</%s>\n", label);
1090 }
1091
1092 GPRTemporary::GPRTemporary()
1093     : m_jit(0)
1094     , m_gpr(InvalidGPRReg)
1095 {
1096 }
1097
1098 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1099     : m_jit(jit)
1100     , m_gpr(InvalidGPRReg)
1101 {
1102     m_gpr = m_jit->allocate();
1103 }
1104
1105 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1106     : m_jit(jit)
1107     , m_gpr(InvalidGPRReg)
1108 {
1109     m_gpr = m_jit->allocate(specific);
1110 }
1111
1112 #if USE(JSVALUE32_64)
1113 GPRTemporary::GPRTemporary(
1114     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1115     : m_jit(jit)
1116     , m_gpr(InvalidGPRReg)
1117 {
1118     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1119         m_gpr = m_jit->reuse(op1.gpr(which));
1120     else
1121         m_gpr = m_jit->allocate();
1122 }
1123 #endif // USE(JSVALUE32_64)
1124
1125 JSValueRegsTemporary::JSValueRegsTemporary() { }
1126
1127 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1128 #if USE(JSVALUE64)
1129     : m_gpr(jit)
1130 #else
1131     : m_payloadGPR(jit)
1132     , m_tagGPR(jit)
1133 #endif
1134 {
1135 }
1136
1137 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1138
1139 JSValueRegs JSValueRegsTemporary::regs()
1140 {
1141 #if USE(JSVALUE64)
1142     return JSValueRegs(m_gpr.gpr());
1143 #else
1144     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1145 #endif
1146 }
1147
1148 void GPRTemporary::adopt(GPRTemporary& other)
1149 {
1150     ASSERT(!m_jit);
1151     ASSERT(m_gpr == InvalidGPRReg);
1152     ASSERT(other.m_jit);
1153     ASSERT(other.m_gpr != InvalidGPRReg);
1154     m_jit = other.m_jit;
1155     m_gpr = other.m_gpr;
1156     other.m_jit = 0;
1157     other.m_gpr = InvalidGPRReg;
1158 }
1159
1160 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1161     : m_jit(jit)
1162     , m_fpr(InvalidFPRReg)
1163 {
1164     m_fpr = m_jit->fprAllocate();
1165 }
1166
1167 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1168     : m_jit(jit)
1169     , m_fpr(InvalidFPRReg)
1170 {
1171     if (m_jit->canReuse(op1.node()))
1172         m_fpr = m_jit->reuse(op1.fpr());
1173     else
1174         m_fpr = m_jit->fprAllocate();
1175 }
1176
1177 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1178     : m_jit(jit)
1179     , m_fpr(InvalidFPRReg)
1180 {
1181     if (m_jit->canReuse(op1.node()))
1182         m_fpr = m_jit->reuse(op1.fpr());
1183     else if (m_jit->canReuse(op2.node()))
1184         m_fpr = m_jit->reuse(op2.fpr());
1185     else
1186         m_fpr = m_jit->fprAllocate();
1187 }
1188
1189 #if USE(JSVALUE32_64)
1190 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1191     : m_jit(jit)
1192     , m_fpr(InvalidFPRReg)
1193 {
1194     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1195         m_fpr = m_jit->reuse(op1.fpr());
1196     else
1197         m_fpr = m_jit->fprAllocate();
1198 }
1199 #endif
1200
1201 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1202 {
1203     BasicBlock* taken = branchNode->branchData()->taken.block;
1204     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1205     
1206     SpeculateDoubleOperand op1(this, node->child1());
1207     SpeculateDoubleOperand op2(this, node->child2());
1208     
1209     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1210     jump(notTaken);
1211 }
1212
1213 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1214 {
1215     BasicBlock* taken = branchNode->branchData()->taken.block;
1216     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1217
1218     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1219     
1220     if (taken == nextBlock()) {
1221         condition = MacroAssembler::NotEqual;
1222         BasicBlock* tmp = taken;
1223         taken = notTaken;
1224         notTaken = tmp;
1225     }
1226
1227     SpeculateCellOperand op1(this, node->child1());
1228     SpeculateCellOperand op2(this, node->child2());
1229     
1230     GPRReg op1GPR = op1.gpr();
1231     GPRReg op2GPR = op2.gpr();
1232     
1233     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1234         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1235             speculationCheck(
1236                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1237         }
1238         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1239             speculationCheck(
1240                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1241         }
1242     } else {
1243         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1244             speculationCheck(
1245                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1246                 m_jit.branchIfNotObject(op1GPR));
1247         }
1248         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1249             m_jit.branchTest8(
1250                 MacroAssembler::NonZero, 
1251                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1252                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1253
1254         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1255             speculationCheck(
1256                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1257                 m_jit.branchIfNotObject(op2GPR));
1258         }
1259         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1260             m_jit.branchTest8(
1261                 MacroAssembler::NonZero, 
1262                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1263                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1264     }
1265
1266     branchPtr(condition, op1GPR, op2GPR, taken);
1267     jump(notTaken);
1268 }
1269
1270 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1271 {
1272     BasicBlock* taken = branchNode->branchData()->taken.block;
1273     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1274
1275     // The branch instruction will branch to the taken block.
1276     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1277     if (taken == nextBlock()) {
1278         condition = JITCompiler::invert(condition);
1279         BasicBlock* tmp = taken;
1280         taken = notTaken;
1281         notTaken = tmp;
1282     }
1283
1284     if (node->child1()->isBooleanConstant()) {
1285         bool imm = node->child1()->asBoolean();
1286         SpeculateBooleanOperand op2(this, node->child2());
1287         branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1288     } else if (node->child2()->isBooleanConstant()) {
1289         SpeculateBooleanOperand op1(this, node->child1());
1290         bool imm = node->child2()->asBoolean();
1291         branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1292     } else {
1293         SpeculateBooleanOperand op1(this, node->child1());
1294         SpeculateBooleanOperand op2(this, node->child2());
1295         branch32(condition, op1.gpr(), op2.gpr(), taken);
1296     }
1297
1298     jump(notTaken);
1299 }
1300
1301 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1302 {
1303     BasicBlock* taken = branchNode->branchData()->taken.block;
1304     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1305
1306     // The branch instruction will branch to the taken block.
1307     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1308     if (taken == nextBlock()) {
1309         condition = JITCompiler::invert(condition);
1310         BasicBlock* tmp = taken;
1311         taken = notTaken;
1312         notTaken = tmp;
1313     }
1314
1315     if (node->child1()->isInt32Constant()) {
1316         int32_t imm = node->child1()->asInt32();
1317         SpeculateInt32Operand op2(this, node->child2());
1318         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1319     } else if (node->child2()->isInt32Constant()) {
1320         SpeculateInt32Operand op1(this, node->child1());
1321         int32_t imm = node->child2()->asInt32();
1322         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1323     } else {
1324         SpeculateInt32Operand op1(this, node->child1());
1325         SpeculateInt32Operand op2(this, node->child2());
1326         branch32(condition, op1.gpr(), op2.gpr(), taken);
1327     }
1328
1329     jump(notTaken);
1330 }
1331
1332 // Returns true if the compare is fused with a subsequent branch.
1333 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1334 {
1335     // Fused compare & branch.
1336     unsigned branchIndexInBlock = detectPeepHoleBranch();
1337     if (branchIndexInBlock != UINT_MAX) {
1338         Node* branchNode = m_block->at(branchIndexInBlock);
1339
1340         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1341         // so can be no intervening nodes to also reference the compare. 
1342         ASSERT(node->adjustedRefCount() == 1);
1343
1344         if (node->isBinaryUseKind(Int32Use))
1345             compilePeepHoleInt32Branch(node, branchNode, condition);
1346 #if USE(JSVALUE64)
1347         else if (node->isBinaryUseKind(Int52RepUse))
1348             compilePeepHoleInt52Branch(node, branchNode, condition);
1349 #endif // USE(JSVALUE64)
1350         else if (node->isBinaryUseKind(DoubleRepUse))
1351             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1352         else if (node->op() == CompareEq) {
1353             if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1354                 // Use non-peephole comparison, for now.
1355                 return false;
1356             }
1357             if (node->isBinaryUseKind(BooleanUse))
1358                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1359             else if (node->isBinaryUseKind(ObjectUse))
1360                 compilePeepHoleObjectEquality(node, branchNode);
1361             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1362                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1363             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1364                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1365             else {
1366                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1367                 return true;
1368             }
1369         } else {
1370             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1371             return true;
1372         }
1373
1374         use(node->child1());
1375         use(node->child2());
1376         m_indexInBlock = branchIndexInBlock;
1377         m_currentNode = branchNode;
1378         return true;
1379     }
1380     return false;
1381 }
1382
1383 void SpeculativeJIT::noticeOSRBirth(Node* node)
1384 {
1385     if (!node->hasVirtualRegister())
1386         return;
1387     
1388     VirtualRegister virtualRegister = node->virtualRegister();
1389     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1390     
1391     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1392 }
1393
1394 void SpeculativeJIT::compileMovHint(Node* node)
1395 {
1396     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1397     
1398     Node* child = node->child1().node();
1399     noticeOSRBirth(child);
1400     
1401     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1402 }
1403
1404 void SpeculativeJIT::bail(AbortReason reason)
1405 {
1406     if (verboseCompilationEnabled())
1407         dataLog("Bailing compilation.\n");
1408     m_compileOkay = true;
1409     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1410     clearGenerationInfo();
1411 }
1412
1413 void SpeculativeJIT::compileCurrentBlock()
1414 {
1415     ASSERT(m_compileOkay);
1416     
1417     if (!m_block)
1418         return;
1419     
1420     ASSERT(m_block->isReachable);
1421     
1422     m_jit.blockHeads()[m_block->index] = m_jit.label();
1423
1424     if (!m_block->intersectionOfCFAHasVisited) {
1425         // Don't generate code for basic blocks that are unreachable according to CFA.
1426         // But to be sure that nobody has generated a jump to this block, drop in a
1427         // breakpoint here.
1428         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1429         return;
1430     }
1431
1432     m_stream->appendAndLog(VariableEvent::reset());
1433     
1434     m_jit.jitAssertHasValidCallFrame();
1435     m_jit.jitAssertTagsInPlace();
1436     m_jit.jitAssertArgumentCountSane();
1437
1438     m_state.reset();
1439     m_state.beginBasicBlock(m_block);
1440     
1441     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1442         int operand = m_block->variablesAtHead.operandForIndex(i);
1443         Node* node = m_block->variablesAtHead[i];
1444         if (!node)
1445             continue; // No need to record dead SetLocal's.
1446         
1447         VariableAccessData* variable = node->variableAccessData();
1448         DataFormat format;
1449         if (!node->refCount())
1450             continue; // No need to record dead SetLocal's.
1451         format = dataFormatFor(variable->flushFormat());
1452         m_stream->appendAndLog(
1453             VariableEvent::setLocal(
1454                 VirtualRegister(operand),
1455                 variable->machineLocal(),
1456                 format));
1457     }
1458     
1459     m_codeOriginForExitTarget = CodeOrigin();
1460     m_codeOriginForExitProfile = CodeOrigin();
1461     
1462     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1463         m_currentNode = m_block->at(m_indexInBlock);
1464         
1465         // We may have hit a contradiction that the CFA was aware of but that the JIT
1466         // didn't cause directly.
1467         if (!m_state.isValid()) {
1468             bail(DFGBailedAtTopOfBlock);
1469             return;
1470         }
1471
1472         if (ASSERT_DISABLED)
1473             m_canExit = true; // Essentially disable the assertions.
1474         else
1475             m_canExit = mayExit(m_jit.graph(), m_currentNode);
1476         
1477         m_interpreter.startExecuting();
1478         m_jit.setForNode(m_currentNode);
1479         m_codeOriginForExitTarget = m_currentNode->origin.forExit;
1480         m_codeOriginForExitProfile = m_currentNode->origin.semantic;
1481         m_lastGeneratedNode = m_currentNode->op();
1482         
1483         ASSERT(m_currentNode->shouldGenerate());
1484         
1485         if (verboseCompilationEnabled()) {
1486             dataLogF(
1487                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1488                 (int)m_currentNode->index(),
1489                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1490             dataLog("\n");
1491         }
1492         
1493         compile(m_currentNode);
1494         
1495         if (belongsInMinifiedGraph(m_currentNode->op()))
1496             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1497         
1498 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1499         m_jit.clearRegisterAllocationOffsets();
1500 #endif
1501         
1502         if (!m_compileOkay) {
1503             bail(DFGBailedAtEndOfNode);
1504             return;
1505         }
1506         
1507         // Make sure that the abstract state is rematerialized for the next node.
1508         m_interpreter.executeEffects(m_indexInBlock);
1509     }
1510     
1511     // Perform the most basic verification that children have been used correctly.
1512     if (!ASSERT_DISABLED) {
1513         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1514             GenerationInfo& info = m_generationInfo[index];
1515             RELEASE_ASSERT(!info.alive());
1516         }
1517     }
1518 }
1519
1520 // If we are making type predictions about our arguments then
1521 // we need to check that they are correct on function entry.
1522 void SpeculativeJIT::checkArgumentTypes()
1523 {
1524     ASSERT(!m_currentNode);
1525     m_isCheckingArgumentTypes = true;
1526     m_codeOriginForExitTarget = CodeOrigin(0);
1527     m_codeOriginForExitProfile = CodeOrigin(0);
1528
1529     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1530         Node* node = m_jit.graph().m_arguments[i];
1531         if (!node) {
1532             // The argument is dead. We don't do any checks for such arguments.
1533             continue;
1534         }
1535         
1536         ASSERT(node->op() == SetArgument);
1537         ASSERT(node->shouldGenerate());
1538
1539         VariableAccessData* variableAccessData = node->variableAccessData();
1540         FlushFormat format = variableAccessData->flushFormat();
1541         
1542         if (format == FlushedJSValue)
1543             continue;
1544         
1545         VirtualRegister virtualRegister = variableAccessData->local();
1546
1547         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1548         
1549 #if USE(JSVALUE64)
1550         switch (format) {
1551         case FlushedInt32: {
1552             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1553             break;
1554         }
1555         case FlushedBoolean: {
1556             GPRTemporary temp(this);
1557             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1558             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1559             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1560             break;
1561         }
1562         case FlushedCell: {
1563             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1564             break;
1565         }
1566         default:
1567             RELEASE_ASSERT_NOT_REACHED();
1568             break;
1569         }
1570 #else
1571         switch (format) {
1572         case FlushedInt32: {
1573             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1574             break;
1575         }
1576         case FlushedBoolean: {
1577             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1578             break;
1579         }
1580         case FlushedCell: {
1581             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1582             break;
1583         }
1584         default:
1585             RELEASE_ASSERT_NOT_REACHED();
1586             break;
1587         }
1588 #endif
1589     }
1590     m_isCheckingArgumentTypes = false;
1591 }
1592
1593 bool SpeculativeJIT::compile()
1594 {
1595     checkArgumentTypes();
1596     
1597     ASSERT(!m_currentNode);
1598     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1599         m_jit.setForBlockIndex(blockIndex);
1600         m_block = m_jit.graph().block(blockIndex);
1601         compileCurrentBlock();
1602     }
1603     linkBranches();
1604     return true;
1605 }
1606
1607 void SpeculativeJIT::createOSREntries()
1608 {
1609     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1610         BasicBlock* block = m_jit.graph().block(blockIndex);
1611         if (!block)
1612             continue;
1613         if (!block->isOSRTarget)
1614             continue;
1615         
1616         // Currently we don't have OSR entry trampolines. We could add them
1617         // here if need be.
1618         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1619     }
1620 }
1621
1622 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1623 {
1624     unsigned osrEntryIndex = 0;
1625     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1626         BasicBlock* block = m_jit.graph().block(blockIndex);
1627         if (!block)
1628             continue;
1629         if (!block->isOSRTarget)
1630             continue;
1631         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1632     }
1633     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1634 }
1635
1636 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1637 {
1638     Edge child3 = m_jit.graph().varArgChild(node, 2);
1639     Edge child4 = m_jit.graph().varArgChild(node, 3);
1640
1641     ArrayMode arrayMode = node->arrayMode();
1642     
1643     GPRReg baseReg = base.gpr();
1644     GPRReg propertyReg = property.gpr();
1645     
1646     SpeculateDoubleOperand value(this, child3);
1647
1648     FPRReg valueReg = value.fpr();
1649     
1650     DFG_TYPE_CHECK(
1651         JSValueRegs(), child3, SpecFullRealNumber,
1652         m_jit.branchDouble(
1653             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1654     
1655     if (!m_compileOkay)
1656         return;
1657     
1658     StorageOperand storage(this, child4);
1659     GPRReg storageReg = storage.gpr();
1660
1661     if (node->op() == PutByValAlias) {
1662         // Store the value to the array.
1663         GPRReg propertyReg = property.gpr();
1664         FPRReg valueReg = value.fpr();
1665         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1666         
1667         noResult(m_currentNode);
1668         return;
1669     }
1670     
1671     GPRTemporary temporary;
1672     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1673
1674     MacroAssembler::Jump slowCase;
1675     
1676     if (arrayMode.isInBounds()) {
1677         speculationCheck(
1678             OutOfBounds, JSValueRegs(), 0,
1679             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1680     } else {
1681         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1682         
1683         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1684         
1685         if (!arrayMode.isOutOfBounds())
1686             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1687         
1688         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1689         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1690         
1691         inBounds.link(&m_jit);
1692     }
1693     
1694     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1695
1696     base.use();
1697     property.use();
1698     value.use();
1699     storage.use();
1700     
1701     if (arrayMode.isOutOfBounds()) {
1702         addSlowPathGenerator(
1703             slowPathCall(
1704                 slowCase, this,
1705                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1706                 NoResult, baseReg, propertyReg, valueReg));
1707     }
1708
1709     noResult(m_currentNode, UseChildrenCalledExplicitly);
1710 }
1711
1712 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1713 {
1714     SpeculateCellOperand string(this, node->child1());
1715     SpeculateStrictInt32Operand index(this, node->child2());
1716     StorageOperand storage(this, node->child3());
1717
1718     GPRReg stringReg = string.gpr();
1719     GPRReg indexReg = index.gpr();
1720     GPRReg storageReg = storage.gpr();
1721     
1722     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1723
1724     // unsigned comparison so we can filter out negative indices and indices that are too large
1725     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1726
1727     GPRTemporary scratch(this);
1728     GPRReg scratchReg = scratch.gpr();
1729
1730     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1731
1732     // Load the character into scratchReg
1733     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1734
1735     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1736     JITCompiler::Jump cont8Bit = m_jit.jump();
1737
1738     is16Bit.link(&m_jit);
1739
1740     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1741
1742     cont8Bit.link(&m_jit);
1743
1744     int32Result(scratchReg, m_currentNode);
1745 }
1746
1747 void SpeculativeJIT::compileGetByValOnString(Node* node)
1748 {
1749     SpeculateCellOperand base(this, node->child1());
1750     SpeculateStrictInt32Operand property(this, node->child2());
1751     StorageOperand storage(this, node->child3());
1752     GPRReg baseReg = base.gpr();
1753     GPRReg propertyReg = property.gpr();
1754     GPRReg storageReg = storage.gpr();
1755
1756     GPRTemporary scratch(this);
1757     GPRReg scratchReg = scratch.gpr();
1758 #if USE(JSVALUE32_64)
1759     GPRTemporary resultTag;
1760     GPRReg resultTagReg = InvalidGPRReg;
1761     if (node->arrayMode().isOutOfBounds()) {
1762         GPRTemporary realResultTag(this);
1763         resultTag.adopt(realResultTag);
1764         resultTagReg = resultTag.gpr();
1765     }
1766 #endif
1767
1768     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1769
1770     // unsigned comparison so we can filter out negative indices and indices that are too large
1771     JITCompiler::Jump outOfBounds = m_jit.branch32(
1772         MacroAssembler::AboveOrEqual, propertyReg,
1773         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1774     if (node->arrayMode().isInBounds())
1775         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1776
1777     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1778
1779     // Load the character into scratchReg
1780     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1781
1782     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1783     JITCompiler::Jump cont8Bit = m_jit.jump();
1784
1785     is16Bit.link(&m_jit);
1786
1787     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1788
1789     JITCompiler::Jump bigCharacter =
1790         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1791
1792     // 8 bit string values don't need the isASCII check.
1793     cont8Bit.link(&m_jit);
1794
1795     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1796     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1797     m_jit.loadPtr(scratchReg, scratchReg);
1798
1799     addSlowPathGenerator(
1800         slowPathCall(
1801             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1802
1803     if (node->arrayMode().isOutOfBounds()) {
1804 #if USE(JSVALUE32_64)
1805         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1806 #endif
1807
1808         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1809         if (globalObject->stringPrototypeChainIsSane()) {
1810             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
1811             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
1812             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
1813             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
1814             // indexed properties either.
1815             // https://bugs.webkit.org/show_bug.cgi?id=144668
1816             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
1817             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
1818             
1819 #if USE(JSVALUE64)
1820             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1821                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
1822 #else
1823             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1824                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
1825                 baseReg, propertyReg));
1826 #endif
1827         } else {
1828 #if USE(JSVALUE64)
1829             addSlowPathGenerator(
1830                 slowPathCall(
1831                     outOfBounds, this, operationGetByValStringInt,
1832                     scratchReg, baseReg, propertyReg));
1833 #else
1834             addSlowPathGenerator(
1835                 slowPathCall(
1836                     outOfBounds, this, operationGetByValStringInt,
1837                     resultTagReg, scratchReg, baseReg, propertyReg));
1838 #endif
1839         }
1840         
1841 #if USE(JSVALUE64)
1842         jsValueResult(scratchReg, m_currentNode);
1843 #else
1844         jsValueResult(resultTagReg, scratchReg, m_currentNode);
1845 #endif
1846     } else
1847         cellResult(scratchReg, m_currentNode);
1848 }
1849
1850 void SpeculativeJIT::compileFromCharCode(Node* node)
1851 {
1852     SpeculateStrictInt32Operand property(this, node->child1());
1853     GPRReg propertyReg = property.gpr();
1854     GPRTemporary smallStrings(this);
1855     GPRTemporary scratch(this);
1856     GPRReg scratchReg = scratch.gpr();
1857     GPRReg smallStringsReg = smallStrings.gpr();
1858
1859     JITCompiler::JumpList slowCases;
1860     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
1861     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
1862     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
1863
1864     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
1865     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
1866     cellResult(scratchReg, m_currentNode);
1867 }
1868
1869 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
1870 {
1871     VirtualRegister virtualRegister = node->virtualRegister();
1872     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1873
1874     switch (info.registerFormat()) {
1875     case DataFormatStorage:
1876         RELEASE_ASSERT_NOT_REACHED();
1877
1878     case DataFormatBoolean:
1879     case DataFormatCell:
1880         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1881         return GeneratedOperandTypeUnknown;
1882
1883     case DataFormatNone:
1884     case DataFormatJSCell:
1885     case DataFormatJS:
1886     case DataFormatJSBoolean:
1887     case DataFormatJSDouble:
1888         return GeneratedOperandJSValue;
1889
1890     case DataFormatJSInt32:
1891     case DataFormatInt32:
1892         return GeneratedOperandInteger;
1893
1894     default:
1895         RELEASE_ASSERT_NOT_REACHED();
1896         return GeneratedOperandTypeUnknown;
1897     }
1898 }
1899
1900 void SpeculativeJIT::compileValueToInt32(Node* node)
1901 {
1902     switch (node->child1().useKind()) {
1903 #if USE(JSVALUE64)
1904     case Int52RepUse: {
1905         SpeculateStrictInt52Operand op1(this, node->child1());
1906         GPRTemporary result(this, Reuse, op1);
1907         GPRReg op1GPR = op1.gpr();
1908         GPRReg resultGPR = result.gpr();
1909         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
1910         int32Result(resultGPR, node, DataFormatInt32);
1911         return;
1912     }
1913 #endif // USE(JSVALUE64)
1914         
1915     case DoubleRepUse: {
1916         GPRTemporary result(this);
1917         SpeculateDoubleOperand op1(this, node->child1());
1918         FPRReg fpr = op1.fpr();
1919         GPRReg gpr = result.gpr();
1920         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
1921         
1922         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
1923         
1924         int32Result(gpr, node);
1925         return;
1926     }
1927     
1928     case NumberUse:
1929     case NotCellUse: {
1930         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
1931         case GeneratedOperandInteger: {
1932             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
1933             GPRTemporary result(this, Reuse, op1);
1934             m_jit.move(op1.gpr(), result.gpr());
1935             int32Result(result.gpr(), node, op1.format());
1936             return;
1937         }
1938         case GeneratedOperandJSValue: {
1939             GPRTemporary result(this);
1940 #if USE(JSVALUE64)
1941             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1942
1943             GPRReg gpr = op1.gpr();
1944             GPRReg resultGpr = result.gpr();
1945             FPRTemporary tempFpr(this);
1946             FPRReg fpr = tempFpr.fpr();
1947
1948             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
1949             JITCompiler::JumpList converted;
1950
1951             if (node->child1().useKind() == NumberUse) {
1952                 DFG_TYPE_CHECK(
1953                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
1954                     m_jit.branchTest64(
1955                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
1956             } else {
1957                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
1958                 
1959                 DFG_TYPE_CHECK(
1960                     JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
1961                 
1962                 // It's not a cell: so true turns into 1 and all else turns into 0.
1963                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
1964                 converted.append(m_jit.jump());
1965                 
1966                 isNumber.link(&m_jit);
1967             }
1968
1969             // First, if we get here we have a double encoded as a JSValue
1970             m_jit.move(gpr, resultGpr);
1971             unboxDouble(resultGpr, fpr);
1972
1973             silentSpillAllRegisters(resultGpr);
1974             callOperation(toInt32, resultGpr, fpr);
1975             silentFillAllRegisters(resultGpr);
1976
1977             converted.append(m_jit.jump());
1978
1979             isInteger.link(&m_jit);
1980             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
1981
1982             converted.link(&m_jit);
1983 #else
1984             Node* childNode = node->child1().node();
1985             VirtualRegister virtualRegister = childNode->virtualRegister();
1986             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1987
1988             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1989
1990             GPRReg payloadGPR = op1.payloadGPR();
1991             GPRReg resultGpr = result.gpr();
1992         
1993             JITCompiler::JumpList converted;
1994
1995             if (info.registerFormat() == DataFormatJSInt32)
1996                 m_jit.move(payloadGPR, resultGpr);
1997             else {
1998                 GPRReg tagGPR = op1.tagGPR();
1999                 FPRTemporary tempFpr(this);
2000                 FPRReg fpr = tempFpr.fpr();
2001                 FPRTemporary scratch(this);
2002
2003                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2004
2005                 if (node->child1().useKind() == NumberUse) {
2006                     DFG_TYPE_CHECK(
2007                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2008                         m_jit.branch32(
2009                             MacroAssembler::AboveOrEqual, tagGPR,
2010                             TrustedImm32(JSValue::LowestTag)));
2011                 } else {
2012                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2013                     
2014                     DFG_TYPE_CHECK(
2015                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2016                         m_jit.branchIfCell(op1.jsValueRegs()));
2017                     
2018                     // It's not a cell: so true turns into 1 and all else turns into 0.
2019                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2020                     m_jit.move(TrustedImm32(0), resultGpr);
2021                     converted.append(m_jit.jump());
2022                     
2023                     isBoolean.link(&m_jit);
2024                     m_jit.move(payloadGPR, resultGpr);
2025                     converted.append(m_jit.jump());
2026                     
2027                     isNumber.link(&m_jit);
2028                 }
2029
2030                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2031
2032                 silentSpillAllRegisters(resultGpr);
2033                 callOperation(toInt32, resultGpr, fpr);
2034                 silentFillAllRegisters(resultGpr);
2035
2036                 converted.append(m_jit.jump());
2037
2038                 isInteger.link(&m_jit);
2039                 m_jit.move(payloadGPR, resultGpr);
2040
2041                 converted.link(&m_jit);
2042             }
2043 #endif
2044             int32Result(resultGpr, node);
2045             return;
2046         }
2047         case GeneratedOperandTypeUnknown:
2048             RELEASE_ASSERT(!m_compileOkay);
2049             return;
2050         }
2051         RELEASE_ASSERT_NOT_REACHED();
2052         return;
2053     }
2054     
2055     default:
2056         ASSERT(!m_compileOkay);
2057         return;
2058     }
2059 }
2060
2061 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2062 {
2063     if (doesOverflow(node->arithMode())) {
2064         // We know that this sometimes produces doubles. So produce a double every
2065         // time. This at least allows subsequent code to not have weird conditionals.
2066             
2067         SpeculateInt32Operand op1(this, node->child1());
2068         FPRTemporary result(this);
2069             
2070         GPRReg inputGPR = op1.gpr();
2071         FPRReg outputFPR = result.fpr();
2072             
2073         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2074             
2075         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2076         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2077         positive.link(&m_jit);
2078             
2079         doubleResult(outputFPR, node);
2080         return;
2081     }
2082     
2083     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2084
2085     SpeculateInt32Operand op1(this, node->child1());
2086     GPRTemporary result(this);
2087
2088     m_jit.move(op1.gpr(), result.gpr());
2089
2090     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2091
2092     int32Result(result.gpr(), node, op1.format());
2093 }
2094
2095 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2096 {
2097     SpeculateDoubleOperand op1(this, node->child1());
2098     FPRTemporary scratch(this);
2099     GPRTemporary result(this);
2100     
2101     FPRReg valueFPR = op1.fpr();
2102     FPRReg scratchFPR = scratch.fpr();
2103     GPRReg resultGPR = result.gpr();
2104
2105     JITCompiler::JumpList failureCases;
2106     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2107     m_jit.branchConvertDoubleToInt32(
2108         valueFPR, resultGPR, failureCases, scratchFPR,
2109         shouldCheckNegativeZero(node->arithMode()));
2110     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2111
2112     int32Result(resultGPR, node);
2113 }
2114
2115 void SpeculativeJIT::compileDoubleRep(Node* node)
2116 {
2117     switch (node->child1().useKind()) {
2118     case RealNumberUse: {
2119         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2120         FPRTemporary result(this);
2121         
2122         JSValueRegs op1Regs = op1.jsValueRegs();
2123         FPRReg resultFPR = result.fpr();
2124         
2125 #if USE(JSVALUE64)
2126         GPRTemporary temp(this);
2127         GPRReg tempGPR = temp.gpr();
2128         m_jit.move(op1Regs.gpr(), tempGPR);
2129         m_jit.unboxDoubleWithoutAssertions(tempGPR, resultFPR);
2130 #else
2131         FPRTemporary temp(this);
2132         FPRReg tempFPR = temp.fpr();
2133         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2134 #endif
2135         
2136         JITCompiler::Jump done = m_jit.branchDouble(
2137             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2138         
2139         DFG_TYPE_CHECK(
2140             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2141         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2142         
2143         done.link(&m_jit);
2144         
2145         doubleResult(resultFPR, node);
2146         return;
2147     }
2148     
2149     case NotCellUse:
2150     case NumberUse: {
2151         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2152
2153         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2154         if (isInt32Speculation(possibleTypes)) {
2155             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2156             FPRTemporary result(this);
2157             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2158             doubleResult(result.fpr(), node);
2159             return;
2160         }
2161
2162         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2163         FPRTemporary result(this);
2164
2165 #if USE(JSVALUE64)
2166         GPRTemporary temp(this);
2167
2168         GPRReg op1GPR = op1.gpr();
2169         GPRReg tempGPR = temp.gpr();
2170         FPRReg resultFPR = result.fpr();
2171         JITCompiler::JumpList done;
2172
2173         JITCompiler::Jump isInteger = m_jit.branch64(
2174             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2175
2176         if (node->child1().useKind() == NotCellUse) {
2177             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2178             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2179
2180             static const double zero = 0;
2181             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2182
2183             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2184             done.append(isNull);
2185
2186             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2187                 m_jit.branchTest64(JITCompiler::NonZero, op1GPR, TrustedImm32(static_cast<int32_t>(~1))));
2188
2189             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2190             static const double one = 1;
2191             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2192             done.append(isFalse);
2193
2194             isUndefined.link(&m_jit);
2195             static const double NaN = PNaN;
2196             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2197             done.append(m_jit.jump());
2198
2199             isNumber.link(&m_jit);
2200         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2201             typeCheck(
2202                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2203                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2204         }
2205     
2206         m_jit.move(op1GPR, tempGPR);
2207         unboxDouble(tempGPR, resultFPR);
2208         done.append(m_jit.jump());
2209     
2210         isInteger.link(&m_jit);
2211         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2212         done.link(&m_jit);
2213 #else // USE(JSVALUE64) -> this is the 32_64 case
2214         FPRTemporary temp(this);
2215     
2216         GPRReg op1TagGPR = op1.tagGPR();
2217         GPRReg op1PayloadGPR = op1.payloadGPR();
2218         FPRReg tempFPR = temp.fpr();
2219         FPRReg resultFPR = result.fpr();
2220         JITCompiler::JumpList done;
2221     
2222         JITCompiler::Jump isInteger = m_jit.branch32(
2223             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2224
2225         if (node->child1().useKind() == NotCellUse) {
2226             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2227             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2228
2229             static const double zero = 0;
2230             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2231
2232             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2233             done.append(isNull);
2234
2235             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2236
2237             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2238             static const double one = 1;
2239             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2240             done.append(isFalse);
2241
2242             isUndefined.link(&m_jit);
2243             static const double NaN = PNaN;
2244             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2245             done.append(m_jit.jump());
2246
2247             isNumber.link(&m_jit);
2248         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2249             typeCheck(
2250                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2251                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2252         }
2253
2254         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2255         done.append(m_jit.jump());
2256     
2257         isInteger.link(&m_jit);
2258         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2259         done.link(&m_jit);
2260 #endif // USE(JSVALUE64)
2261     
2262         doubleResult(resultFPR, node);
2263         return;
2264     }
2265         
2266 #if USE(JSVALUE64)
2267     case Int52RepUse: {
2268         SpeculateStrictInt52Operand value(this, node->child1());
2269         FPRTemporary result(this);
2270         
2271         GPRReg valueGPR = value.gpr();
2272         FPRReg resultFPR = result.fpr();
2273
2274         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2275         
2276         doubleResult(resultFPR, node);
2277         return;
2278     }
2279 #endif // USE(JSVALUE64)
2280         
2281     default:
2282         RELEASE_ASSERT_NOT_REACHED();
2283         return;
2284     }
2285 }
2286
2287 void SpeculativeJIT::compileValueRep(Node* node)
2288 {
2289     switch (node->child1().useKind()) {
2290     case DoubleRepUse: {
2291         SpeculateDoubleOperand value(this, node->child1());
2292         JSValueRegsTemporary result(this);
2293         
2294         FPRReg valueFPR = value.fpr();
2295         JSValueRegs resultRegs = result.regs();
2296         
2297         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2298         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2299         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2300         // local was purified.
2301         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2302             m_jit.purifyNaN(valueFPR);
2303
2304         boxDouble(valueFPR, resultRegs);
2305         
2306         jsValueResult(resultRegs, node);
2307         return;
2308     }
2309         
2310 #if USE(JSVALUE64)
2311     case Int52RepUse: {
2312         SpeculateStrictInt52Operand value(this, node->child1());
2313         GPRTemporary result(this);
2314         
2315         GPRReg valueGPR = value.gpr();
2316         GPRReg resultGPR = result.gpr();
2317         
2318         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2319         
2320         jsValueResult(resultGPR, node);
2321         return;
2322     }
2323 #endif // USE(JSVALUE64)
2324         
2325     default:
2326         RELEASE_ASSERT_NOT_REACHED();
2327         return;
2328     }
2329 }
2330
2331 static double clampDoubleToByte(double d)
2332 {
2333     d += 0.5;
2334     if (!(d > 0))
2335         d = 0;
2336     else if (d > 255)
2337         d = 255;
2338     return d;
2339 }
2340
2341 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2342 {
2343     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2344     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2345     jit.xorPtr(result, result);
2346     MacroAssembler::Jump clamped = jit.jump();
2347     tooBig.link(&jit);
2348     jit.move(JITCompiler::TrustedImm32(255), result);
2349     clamped.link(&jit);
2350     inBounds.link(&jit);
2351 }
2352
2353 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2354 {
2355     // Unordered compare so we pick up NaN
2356     static const double zero = 0;
2357     static const double byteMax = 255;
2358     static const double half = 0.5;
2359     jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2360     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2361     jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2362     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2363     
2364     jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2365     // FIXME: This should probably just use a floating point round!
2366     // https://bugs.webkit.org/show_bug.cgi?id=72054
2367     jit.addDouble(source, scratch);
2368     jit.truncateDoubleToInt32(scratch, result);   
2369     MacroAssembler::Jump truncatedInt = jit.jump();
2370     
2371     tooSmall.link(&jit);
2372     jit.xorPtr(result, result);
2373     MacroAssembler::Jump zeroed = jit.jump();
2374     
2375     tooBig.link(&jit);
2376     jit.move(JITCompiler::TrustedImm32(255), result);
2377     
2378     truncatedInt.link(&jit);
2379     zeroed.link(&jit);
2380
2381 }
2382
2383 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2384 {
2385     if (node->op() == PutByValAlias)
2386         return JITCompiler::Jump();
2387     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2388         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2389     if (view) {
2390         uint32_t length = view->length();
2391         Node* indexNode = m_jit.graph().child(node, 1).node();
2392         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2393             return JITCompiler::Jump();
2394         return m_jit.branch32(
2395             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2396     }
2397     return m_jit.branch32(
2398         MacroAssembler::AboveOrEqual, indexGPR,
2399         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2400 }
2401
2402 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2403 {
2404     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2405     if (!jump.isSet())
2406         return;
2407     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2408 }
2409
2410 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2411 {
2412     ASSERT(isInt(type));
2413     
2414     SpeculateCellOperand base(this, node->child1());
2415     SpeculateStrictInt32Operand property(this, node->child2());
2416     StorageOperand storage(this, node->child3());
2417
2418     GPRReg baseReg = base.gpr();
2419     GPRReg propertyReg = property.gpr();
2420     GPRReg storageReg = storage.gpr();
2421
2422     GPRTemporary result(this);
2423     GPRReg resultReg = result.gpr();
2424
2425     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2426
2427     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2428     switch (elementSize(type)) {
2429     case 1:
2430         if (isSigned(type))
2431             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2432         else
2433             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2434         break;
2435     case 2:
2436         if (isSigned(type))
2437             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2438         else
2439             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2440         break;
2441     case 4:
2442         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2443         break;
2444     default:
2445         CRASH();
2446     }
2447     if (elementSize(type) < 4 || isSigned(type)) {
2448         int32Result(resultReg, node);
2449         return;
2450     }
2451     
2452     ASSERT(elementSize(type) == 4 && !isSigned(type));
2453     if (node->shouldSpeculateInt32()) {
2454         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2455         int32Result(resultReg, node);
2456         return;
2457     }
2458     
2459 #if USE(JSVALUE64)
2460     if (node->shouldSpeculateMachineInt()) {
2461         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2462         strictInt52Result(resultReg, node);
2463         return;
2464     }
2465 #endif
2466     
2467     FPRTemporary fresult(this);
2468     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2469     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2470     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2471     positive.link(&m_jit);
2472     doubleResult(fresult.fpr(), node);
2473 }
2474
2475 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2476 {
2477     ASSERT(isInt(type));
2478     
2479     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2480     GPRReg storageReg = storage.gpr();
2481     
2482     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2483     
2484     GPRTemporary value;
2485     GPRReg valueGPR = InvalidGPRReg;
2486     
2487     if (valueUse->isConstant()) {
2488         JSValue jsValue = valueUse->asJSValue();
2489         if (!jsValue.isNumber()) {
2490             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2491             noResult(node);
2492             return;
2493         }
2494         double d = jsValue.asNumber();
2495         if (isClamped(type)) {
2496             ASSERT(elementSize(type) == 1);
2497             d = clampDoubleToByte(d);
2498         }
2499         GPRTemporary scratch(this);
2500         GPRReg scratchReg = scratch.gpr();
2501         m_jit.move(Imm32(toInt32(d)), scratchReg);
2502         value.adopt(scratch);
2503         valueGPR = scratchReg;
2504     } else {
2505         switch (valueUse.useKind()) {
2506         case Int32Use: {
2507             SpeculateInt32Operand valueOp(this, valueUse);
2508             GPRTemporary scratch(this);
2509             GPRReg scratchReg = scratch.gpr();
2510             m_jit.move(valueOp.gpr(), scratchReg);
2511             if (isClamped(type)) {
2512                 ASSERT(elementSize(type) == 1);
2513                 compileClampIntegerToByte(m_jit, scratchReg);
2514             }
2515             value.adopt(scratch);
2516             valueGPR = scratchReg;
2517             break;
2518         }
2519             
2520 #if USE(JSVALUE64)
2521         case Int52RepUse: {
2522             SpeculateStrictInt52Operand valueOp(this, valueUse);
2523             GPRTemporary scratch(this);
2524             GPRReg scratchReg = scratch.gpr();
2525             m_jit.move(valueOp.gpr(), scratchReg);
2526             if (isClamped(type)) {
2527                 ASSERT(elementSize(type) == 1);
2528                 MacroAssembler::Jump inBounds = m_jit.branch64(
2529                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2530                 MacroAssembler::Jump tooBig = m_jit.branch64(
2531                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2532                 m_jit.move(TrustedImm32(0), scratchReg);
2533                 MacroAssembler::Jump clamped = m_jit.jump();
2534                 tooBig.link(&m_jit);
2535                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2536                 clamped.link(&m_jit);
2537                 inBounds.link(&m_jit);
2538             }
2539             value.adopt(scratch);
2540             valueGPR = scratchReg;
2541             break;
2542         }
2543 #endif // USE(JSVALUE64)
2544             
2545         case DoubleRepUse: {
2546             if (isClamped(type)) {
2547                 ASSERT(elementSize(type) == 1);
2548                 SpeculateDoubleOperand valueOp(this, valueUse);
2549                 GPRTemporary result(this);
2550                 FPRTemporary floatScratch(this);
2551                 FPRReg fpr = valueOp.fpr();
2552                 GPRReg gpr = result.gpr();
2553                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2554                 value.adopt(result);
2555                 valueGPR = gpr;
2556             } else {
2557                 SpeculateDoubleOperand valueOp(this, valueUse);
2558                 GPRTemporary result(this);
2559                 FPRReg fpr = valueOp.fpr();
2560                 GPRReg gpr = result.gpr();
2561                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2562                 m_jit.xorPtr(gpr, gpr);
2563                 MacroAssembler::Jump fixed = m_jit.jump();
2564                 notNaN.link(&m_jit);
2565                 
2566                 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2567                     fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2568                 
2569                 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr));
2570                 
2571                 fixed.link(&m_jit);
2572                 value.adopt(result);
2573                 valueGPR = gpr;
2574             }
2575             break;
2576         }
2577             
2578         default:
2579             RELEASE_ASSERT_NOT_REACHED();
2580             break;
2581         }
2582     }
2583     
2584     ASSERT_UNUSED(valueGPR, valueGPR != property);
2585     ASSERT(valueGPR != base);
2586     ASSERT(valueGPR != storageReg);
2587     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2588     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2589         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2590         outOfBounds = MacroAssembler::Jump();
2591     }
2592
2593     switch (elementSize(type)) {
2594     case 1:
2595         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2596         break;
2597     case 2:
2598         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2599         break;
2600     case 4:
2601         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2602         break;
2603     default:
2604         CRASH();
2605     }
2606     if (outOfBounds.isSet())
2607         outOfBounds.link(&m_jit);
2608     noResult(node);
2609 }
2610
2611 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2612 {
2613     ASSERT(isFloat(type));
2614     
2615     SpeculateCellOperand base(this, node->child1());
2616     SpeculateStrictInt32Operand property(this, node->child2());
2617     StorageOperand storage(this, node->child3());
2618
2619     GPRReg baseReg = base.gpr();
2620     GPRReg propertyReg = property.gpr();
2621     GPRReg storageReg = storage.gpr();
2622
2623     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2624
2625     FPRTemporary result(this);
2626     FPRReg resultReg = result.fpr();
2627     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2628     switch (elementSize(type)) {
2629     case 4:
2630         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2631         m_jit.convertFloatToDouble(resultReg, resultReg);
2632         break;
2633     case 8: {
2634         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2635         break;
2636     }
2637     default:
2638         RELEASE_ASSERT_NOT_REACHED();
2639     }
2640     
2641     doubleResult(resultReg, node);
2642 }
2643
2644 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2645 {
2646     ASSERT(isFloat(type));
2647     
2648     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2649     GPRReg storageReg = storage.gpr();
2650     
2651     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2652     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2653
2654     SpeculateDoubleOperand valueOp(this, valueUse);
2655     FPRTemporary scratch(this);
2656     FPRReg valueFPR = valueOp.fpr();
2657     FPRReg scratchFPR = scratch.fpr();
2658
2659     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2660     
2661     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2662     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2663         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2664         outOfBounds = MacroAssembler::Jump();
2665     }
2666     
2667     switch (elementSize(type)) {
2668     case 4: {
2669         m_jit.moveDouble(valueFPR, scratchFPR);
2670         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2671         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2672         break;
2673     }
2674     case 8:
2675         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2676         break;
2677     default:
2678         RELEASE_ASSERT_NOT_REACHED();
2679     }
2680     if (outOfBounds.isSet())
2681         outOfBounds.link(&m_jit);
2682     noResult(node);
2683 }
2684
2685 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2686 {
2687     // Check that prototype is an object.
2688     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
2689     
2690     // Initialize scratchReg with the value being checked.
2691     m_jit.move(valueReg, scratchReg);
2692     
2693     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2694     MacroAssembler::Label loop(&m_jit);
2695     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2696     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2697     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2698 #if USE(JSVALUE64)
2699     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2700 #else
2701     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2702 #endif
2703     
2704     // No match - result is false.
2705 #if USE(JSVALUE64)
2706     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2707 #else
2708     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2709 #endif
2710     MacroAssembler::Jump putResult = m_jit.jump();
2711     
2712     isInstance.link(&m_jit);
2713 #if USE(JSVALUE64)
2714     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2715 #else
2716     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2717 #endif
2718     
2719     putResult.link(&m_jit);
2720 }
2721
2722 void SpeculativeJIT::compileInstanceOf(Node* node)
2723 {
2724     if (node->child1().useKind() == UntypedUse) {
2725         // It might not be a cell. Speculate less aggressively.
2726         // Or: it might only be used once (i.e. by us), so we get zero benefit
2727         // from speculating any more aggressively than we absolutely need to.
2728         
2729         JSValueOperand value(this, node->child1());
2730         SpeculateCellOperand prototype(this, node->child2());
2731         GPRTemporary scratch(this);
2732         GPRTemporary scratch2(this);
2733         
2734         GPRReg prototypeReg = prototype.gpr();
2735         GPRReg scratchReg = scratch.gpr();
2736         GPRReg scratch2Reg = scratch2.gpr();
2737         
2738         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
2739         GPRReg valueReg = value.jsValueRegs().payloadGPR();
2740         moveFalseTo(scratchReg);
2741
2742         MacroAssembler::Jump done = m_jit.jump();
2743         
2744         isCell.link(&m_jit);
2745         
2746         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2747         
2748         done.link(&m_jit);
2749
2750         blessedBooleanResult(scratchReg, node);
2751         return;
2752     }
2753     
2754     SpeculateCellOperand value(this, node->child1());
2755     SpeculateCellOperand prototype(this, node->child2());
2756     
2757     GPRTemporary scratch(this);
2758     GPRTemporary scratch2(this);
2759     
2760     GPRReg valueReg = value.gpr();
2761     GPRReg prototypeReg = prototype.gpr();
2762     GPRReg scratchReg = scratch.gpr();
2763     GPRReg scratch2Reg = scratch2.gpr();
2764     
2765     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2766
2767     blessedBooleanResult(scratchReg, node);
2768 }
2769
2770 void SpeculativeJIT::compileAdd(Node* node)
2771 {
2772     switch (node->binaryUseKind()) {
2773     case Int32Use: {
2774         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2775         
2776         if (node->child1()->isInt32Constant()) {
2777             int32_t imm1 = node->child1()->asInt32();
2778             SpeculateInt32Operand op2(this, node->child2());
2779             GPRTemporary result(this);
2780
2781             if (!shouldCheckOverflow(node->arithMode())) {
2782                 m_jit.move(op2.gpr(), result.gpr());
2783                 m_jit.add32(Imm32(imm1), result.gpr());
2784             } else
2785                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
2786
2787             int32Result(result.gpr(), node);
2788             return;
2789         }
2790         
2791         if (node->child2()->isInt32Constant()) {
2792             SpeculateInt32Operand op1(this, node->child1());
2793             int32_t imm2 = node->child2()->asInt32();
2794             GPRTemporary result(this);
2795                 
2796             if (!shouldCheckOverflow(node->arithMode())) {
2797                 m_jit.move(op1.gpr(), result.gpr());
2798                 m_jit.add32(Imm32(imm2), result.gpr());
2799             } else
2800                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
2801
2802             int32Result(result.gpr(), node);
2803             return;
2804         }
2805                 
2806         SpeculateInt32Operand op1(this, node->child1());
2807         SpeculateInt32Operand op2(this, node->child2());
2808         GPRTemporary result(this, Reuse, op1, op2);
2809
2810         GPRReg gpr1 = op1.gpr();
2811         GPRReg gpr2 = op2.gpr();
2812         GPRReg gprResult = result.gpr();
2813
2814         if (!shouldCheckOverflow(node->arithMode())) {
2815             if (gpr1 == gprResult)
2816                 m_jit.add32(gpr2, gprResult);
2817             else {
2818                 m_jit.move(gpr2, gprResult);
2819                 m_jit.add32(gpr1, gprResult);
2820             }
2821         } else {
2822             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
2823                 
2824             if (gpr1 == gprResult)
2825                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
2826             else if (gpr2 == gprResult)
2827                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
2828             else
2829                 speculationCheck(Overflow, JSValueRegs(), 0, check);
2830         }
2831
2832         int32Result(gprResult, node);
2833         return;
2834     }
2835         
2836 #if USE(JSVALUE64)
2837     case Int52RepUse: {
2838         ASSERT(shouldCheckOverflow(node->arithMode()));
2839         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2840
2841         // Will we need an overflow check? If we can prove that neither input can be
2842         // Int52 then the overflow check will not be necessary.
2843         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2844             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2845             SpeculateWhicheverInt52Operand op1(this, node->child1());
2846             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2847             GPRTemporary result(this, Reuse, op1);
2848             m_jit.move(op1.gpr(), result.gpr());
2849             m_jit.add64(op2.gpr(), result.gpr());
2850             int52Result(result.gpr(), node, op1.format());
2851             return;
2852         }
2853         
2854         SpeculateInt52Operand op1(this, node->child1());
2855         SpeculateInt52Operand op2(this, node->child2());
2856         GPRTemporary result(this);
2857         m_jit.move(op1.gpr(), result.gpr());
2858         speculationCheck(
2859             Int52Overflow, JSValueRegs(), 0,
2860             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2861         int52Result(result.gpr(), node);
2862         return;
2863     }
2864 #endif // USE(JSVALUE64)
2865     
2866     case DoubleRepUse: {
2867         SpeculateDoubleOperand op1(this, node->child1());
2868         SpeculateDoubleOperand op2(this, node->child2());
2869         FPRTemporary result(this, op1, op2);
2870
2871         FPRReg reg1 = op1.fpr();
2872         FPRReg reg2 = op2.fpr();
2873         m_jit.addDouble(reg1, reg2, result.fpr());
2874
2875         doubleResult(result.fpr(), node);
2876         return;
2877     }
2878         
2879     default:
2880         RELEASE_ASSERT_NOT_REACHED();
2881         break;
2882     }
2883 }
2884
2885 void SpeculativeJIT::compileMakeRope(Node* node)
2886 {
2887     ASSERT(node->child1().useKind() == KnownStringUse);
2888     ASSERT(node->child2().useKind() == KnownStringUse);
2889     ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
2890     
2891     SpeculateCellOperand op1(this, node->child1());
2892     SpeculateCellOperand op2(this, node->child2());
2893     SpeculateCellOperand op3(this, node->child3());
2894     GPRTemporary result(this);
2895     GPRTemporary allocator(this);
2896     GPRTemporary scratch(this);
2897     
2898     GPRReg opGPRs[3];
2899     unsigned numOpGPRs;
2900     opGPRs[0] = op1.gpr();
2901     opGPRs[1] = op2.gpr();
2902     if (node->child3()) {
2903         opGPRs[2] = op3.gpr();
2904         numOpGPRs = 3;
2905     } else {
2906         opGPRs[2] = InvalidGPRReg;
2907         numOpGPRs = 2;
2908     }
2909     GPRReg resultGPR = result.gpr();
2910     GPRReg allocatorGPR = allocator.gpr();
2911     GPRReg scratchGPR = scratch.gpr();
2912     
2913     JITCompiler::JumpList slowPath;
2914     MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithDestructor(sizeof(JSRopeString));
2915     m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
2916     emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
2917         
2918     m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
2919     for (unsigned i = 0; i < numOpGPRs; ++i)
2920         m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2921     for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
2922         m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2923     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
2924     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
2925     if (!ASSERT_DISABLED) {
2926         JITCompiler::Jump ok = m_jit.branch32(
2927             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2928         m_jit.abortWithReason(DFGNegativeStringLength);
2929         ok.link(&m_jit);
2930     }
2931     for (unsigned i = 1; i < numOpGPRs; ++i) {
2932         m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
2933         speculationCheck(
2934             Uncountable, JSValueSource(), nullptr,
2935             m_jit.branchAdd32(
2936                 JITCompiler::Overflow,
2937                 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
2938     }
2939     m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
2940     m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
2941     if (!ASSERT_DISABLED) {
2942         JITCompiler::Jump ok = m_jit.branch32(
2943             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2944         m_jit.abortWithReason(DFGNegativeStringLength);
2945         ok.link(&m_jit);
2946     }
2947     m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
2948     
2949     switch (numOpGPRs) {
2950     case 2:
2951         addSlowPathGenerator(slowPathCall(
2952             slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
2953         break;
2954     case 3:
2955         addSlowPathGenerator(slowPathCall(
2956             slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
2957         break;
2958     default:
2959         RELEASE_ASSERT_NOT_REACHED();
2960         break;
2961     }
2962         
2963     cellResult(resultGPR, node);
2964 }
2965
2966 void SpeculativeJIT::compileArithClz32(Node* node)
2967 {
2968     ASSERT_WITH_MESSAGE(node->child1().useKind() == Int32Use || node->child1().useKind() == KnownInt32Use, "The Fixup phase should have enforced a Int32 operand.");
2969     SpeculateInt32Operand value(this, node->child1());
2970     GPRTemporary result(this, Reuse, value);
2971     GPRReg valueReg = value.gpr();
2972     GPRReg resultReg = result.gpr();
2973     m_jit.countLeadingZeros32(valueReg, resultReg);
2974     int32Result(resultReg, node);
2975 }
2976
2977 void SpeculativeJIT::compileArithSub(Node* node)
2978 {
2979     switch (node->binaryUseKind()) {
2980     case Int32Use: {
2981         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2982         
2983         if (node->child2()->isNumberConstant()) {
2984             SpeculateInt32Operand op1(this, node->child1());
2985             int32_t imm2 = node->child2()->asInt32();
2986             GPRTemporary result(this);
2987
2988             if (!shouldCheckOverflow(node->arithMode())) {
2989                 m_jit.move(op1.gpr(), result.gpr());
2990                 m_jit.sub32(Imm32(imm2), result.gpr());
2991             } else {
2992                 GPRTemporary scratch(this);
2993                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
2994             }
2995
2996             int32Result(result.gpr(), node);
2997             return;
2998         }
2999             
3000         if (node->child1()->isNumberConstant()) {
3001             int32_t imm1 = node->child1()->asInt32();
3002             SpeculateInt32Operand op2(this, node->child2());
3003             GPRTemporary result(this);
3004                 
3005             m_jit.move(Imm32(imm1), result.gpr());
3006             if (!shouldCheckOverflow(node->arithMode()))
3007                 m_jit.sub32(op2.gpr(), result.gpr());
3008             else
3009                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3010                 
3011             int32Result(result.gpr(), node);
3012             return;
3013         }
3014             
3015         SpeculateInt32Operand op1(this, node->child1());
3016         SpeculateInt32Operand op2(this, node->child2());
3017         GPRTemporary result(this);
3018
3019         if (!shouldCheckOverflow(node->arithMode())) {
3020             m_jit.move(op1.gpr(), result.gpr());
3021             m_jit.sub32(op2.gpr(), result.gpr());
3022         } else
3023             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
3024
3025         int32Result(result.gpr(), node);
3026         return;
3027     }
3028         
3029 #if USE(JSVALUE64)
3030     case Int52RepUse: {
3031         ASSERT(shouldCheckOverflow(node->arithMode()));
3032         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3033
3034         // Will we need an overflow check? If we can prove that neither input can be
3035         // Int52 then the overflow check will not be necessary.
3036         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
3037             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
3038             SpeculateWhicheverInt52Operand op1(this, node->child1());
3039             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3040             GPRTemporary result(this, Reuse, op1);
3041             m_jit.move(op1.gpr(), result.gpr());
3042             m_jit.sub64(op2.gpr(), result.gpr());
3043             int52Result(result.gpr(), node, op1.format());
3044             return;
3045         }
3046         
3047         SpeculateInt52Operand op1(this, node->child1());
3048         SpeculateInt52Operand op2(this, node->child2());
3049         GPRTemporary result(this);
3050         m_jit.move(op1.gpr(), result.gpr());
3051         speculationCheck(
3052             Int52Overflow, JSValueRegs(), 0,
3053             m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3054         int52Result(result.gpr(), node);
3055         return;
3056     }
3057 #endif // USE(JSVALUE64)
3058
3059     case DoubleRepUse: {
3060         SpeculateDoubleOperand op1(this, node->child1());
3061         SpeculateDoubleOperand op2(this, node->child2());
3062         FPRTemporary result(this, op1);
3063
3064         FPRReg reg1 = op1.fpr();
3065         FPRReg reg2 = op2.fpr();
3066         m_jit.subDouble(reg1, reg2, result.fpr());
3067
3068         doubleResult(result.fpr(), node);
3069         return;
3070     }
3071         
3072     default:
3073         RELEASE_ASSERT_NOT_REACHED();
3074         return;
3075     }
3076 }
3077
3078 void SpeculativeJIT::compileArithNegate(Node* node)
3079 {
3080     switch (node->child1().useKind()) {
3081     case Int32Use: {
3082         SpeculateInt32Operand op1(this, node->child1());
3083         GPRTemporary result(this);
3084
3085         m_jit.move(op1.gpr(), result.gpr());
3086
3087         // Note: there is no notion of being not used as a number, but someone
3088         // caring about negative zero.
3089         
3090         if (!shouldCheckOverflow(node->arithMode()))
3091             m_jit.neg32(result.gpr());
3092         else if (!shouldCheckNegativeZero(node->arithMode()))
3093             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
3094         else {
3095             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
3096             m_jit.neg32(result.gpr());
3097         }
3098
3099         int32Result(result.gpr(), node);
3100         return;
3101     }
3102
3103 #if USE(JSVALUE64)
3104     case Int52RepUse: {
3105         ASSERT(shouldCheckOverflow(node->arithMode()));
3106         
3107         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)) {
3108             SpeculateWhicheverInt52Operand op1(this, node->child1());
3109             GPRTemporary result(this);
3110             GPRReg op1GPR = op1.gpr();
3111             GPRReg resultGPR = result.gpr();
3112             m_jit.move(op1GPR, resultGPR);
3113             m_jit.neg64(resultGPR);
3114             if (shouldCheckNegativeZero(node->arithMode())) {
3115                 speculationCheck(
3116                     NegativeZero, JSValueRegs(), 0,
3117                     m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3118             }
3119             int52Result(resultGPR, node, op1.format());
3120             return;
3121         }
3122         
3123         SpeculateInt52Operand op1(this, node->child1());
3124         GPRTemporary result(this);
3125         GPRReg op1GPR = op1.gpr();
3126         GPRReg resultGPR = result.gpr();
3127         m_jit.move(op1GPR, resultGPR);
3128         speculationCheck(
3129             Int52Overflow, JSValueRegs(), 0,
3130             m_jit.branchNeg64(MacroAssembler::Overflow, resultGPR));
3131         if (shouldCheckNegativeZero(node->arithMode())) {
3132             speculationCheck(
3133                 NegativeZero, JSValueRegs(), 0,
3134                 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3135         }
3136         int52Result(resultGPR, node);
3137         return;
3138     }
3139 #endif // USE(JSVALUE64)
3140         
3141     case DoubleRepUse: {
3142         SpeculateDoubleOperand op1(this, node->child1());
3143         FPRTemporary result(this);
3144         
3145         m_jit.negateDouble(op1.fpr(), result.fpr());
3146         
3147         doubleResult(result.fpr(), node);
3148         return;
3149     }
3150         
3151     default:
3152         RELEASE_ASSERT_NOT_REACHED();
3153         return;
3154     }
3155 }
3156 void SpeculativeJIT::compileArithMul(Node* node)
3157 {
3158     switch (node->binaryUseKind()) {
3159     case Int32Use: {
3160         SpeculateInt32Operand op1(this, node->child1());
3161         SpeculateInt32Operand op2(this, node->child2());
3162         GPRTemporary result(this);
3163
3164         GPRReg reg1 = op1.gpr();
3165         GPRReg reg2 = op2.gpr();
3166
3167         // We can perform truncated multiplications if we get to this point, because if the
3168         // fixup phase could not prove that it would be safe, it would have turned us into
3169         // a double multiplication.
3170         if (!shouldCheckOverflow(node->arithMode())) {
3171             m_jit.move(reg1, result.gpr());
3172             m_jit.mul32(reg2, result.gpr());
3173         } else {
3174             speculationCheck(
3175                 Overflow, JSValueRegs(), 0,
3176                 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3177         }
3178             
3179         // Check for negative zero, if the users of this node care about such things.
3180         if (shouldCheckNegativeZero(node->arithMode())) {
3181             MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3182             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
3183             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
3184             resultNonZero.link(&m_jit);
3185         }
3186
3187         int32Result(result.gpr(), node);
3188         return;
3189     }
3190     
3191 #if USE(JSVALUE64)   
3192     case Int52RepUse: {
3193         ASSERT(shouldCheckOverflow(node->arithMode()));
3194         
3195         // This is super clever. We want to do an int52 multiplication and check the
3196         // int52 overflow bit. There is no direct hardware support for this, but we do
3197         // have the ability to do an int64 multiplication and check the int64 overflow
3198         // bit. We leverage that. Consider that a, b are int52 numbers inside int64
3199         // registers, with the high 12 bits being sign-extended. We can do:
3200         //
3201         //     (a * (b << 12))
3202         //
3203         // This will give us a left-shifted int52 (value is in high 52 bits, low 16
3204         // bits are zero) plus the int52 overflow bit. I.e. whether this 64-bit
3205         // multiplication overflows is identical to whether the 'a * b' 52-bit
3206         // multiplication overflows.
3207         //
3208         // In our nomenclature, this is:
3209         //
3210         //     strictInt52(a) * int52(b) => int52
3211         //
3212         // That is "strictInt52" means unshifted and "int52" means left-shifted by 16
3213         // bits.
3214         //
3215         // We don't care which of op1 or op2 serves as the left-shifted operand, so
3216         // we just do whatever is more convenient for op1 and have op2 do the
3217         // opposite. This ensures that we do at most one shift.
3218
3219         SpeculateWhicheverInt52Operand op1(this, node->child1());
3220         SpeculateWhicheverInt52Operand op2(this, node->child2(), OppositeShift, op1);
3221         GPRTemporary result(this);
3222         
3223         GPRReg op1GPR = op1.gpr();
3224         GPRReg op2GPR = op2.gpr();
3225         GPRReg resultGPR = result.gpr();
3226         
3227         m_jit.move(op1GPR, resultGPR);
3228         speculationCheck(
3229             Int52Overflow, JSValueRegs(), 0,
3230             m_jit.branchMul64(MacroAssembler::Overflow, op2GPR, resultGPR));
3231         
3232         if (shouldCheckNegativeZero(node->arithMode())) {
3233             MacroAssembler::Jump resultNonZero = m_jit.branchTest64(
3234                 MacroAssembler::NonZero, resultGPR);
3235             speculationCheck(
3236                 NegativeZero, JSValueRegs(), 0,
3237                 m_jit.branch64(MacroAssembler::LessThan, op1GPR, TrustedImm64(0)));
3238             speculationCheck(
3239                 NegativeZero, JSValueRegs(), 0,
3240                 m_jit.branch64(MacroAssembler::LessThan, op2GPR, TrustedImm64(0)));
3241             resultNonZero.link(&m_jit);
3242         }
3243         
3244         int52Result(resultGPR, node);
3245         return;
3246     }
3247 #endif // USE(JSVALUE64)
3248         
3249     case DoubleRepUse: {
3250         SpeculateDoubleOperand op1(this, node->child1());
3251         SpeculateDoubleOperand op2(this, node->child2());
3252         FPRTemporary result(this, op1, op2);
3253         
3254         FPRReg reg1 = op1.fpr();
3255         FPRReg reg2 = op2.fpr();
3256         
3257         m_jit.mulDouble(reg1, reg2, result.fpr());
3258         
3259         doubleResult(result.fpr(), node);
3260         return;
3261     }
3262         
3263     default:
3264         RELEASE_ASSERT_NOT_REACHED();
3265         return;
3266     }
3267 }
3268
3269 void SpeculativeJIT::compileArithDiv(Node* node)
3270 {
3271     switch (node->binaryUseKind()) {
3272     case Int32Use: {
3273 #if CPU(X86) || CPU(X86_64)
3274         SpeculateInt32Operand op1(this, node->child1());
3275         SpeculateInt32Operand op2(this, node->child2());
3276         GPRTemporary eax(this, X86Registers::eax);
3277         GPRTemporary edx(this, X86Registers::edx);
3278         GPRReg op1GPR = op1.gpr();
3279         GPRReg op2GPR = op2.gpr();
3280     
3281         GPRReg op2TempGPR;
3282         GPRReg temp;
3283         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3284             op2TempGPR = allocate();
3285             temp = op2TempGPR;
3286         } else {
3287             op2TempGPR = InvalidGPRReg;
3288             if (op1GPR == X86Registers::eax)
3289                 temp = X86Registers::edx;
3290             else
3291                 temp = X86Registers::eax;
3292         }
3293     
3294         ASSERT(temp != op1GPR);
3295         ASSERT(temp != op2GPR);
3296     
3297         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3298     
3299         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3300     
3301         JITCompiler::JumpList done;
3302         if (shouldCheckOverflow(node->arithMode())) {
3303             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3304             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3305         } else {
3306             // This is the case where we convert the result to an int after we're done, and we
3307             // already know that the denominator is either -1 or 0. So, if the denominator is
3308             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3309             // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
3310             // are happy to fall through to a normal division, since we're just dividing
3311             // something by negative 1.
3312         
3313             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3314             m_jit.move(TrustedImm32(0), eax.gpr());
3315             done.append(m_jit.jump());
3316         
3317             notZero.link(&m_jit);
3318             JITCompiler::Jump notNeg2ToThe31 =
3319                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3320             m_jit.zeroExtend32ToPtr(op1GPR, eax.gpr());
3321             done.append(m_jit.jump());
3322         
3323             notNeg2ToThe31.link(&m_jit);
3324         }
3325     
3326         safeDenominator.link(&m_jit);
3327     
3328         // If the user cares about negative zero, then speculate that we're not about
3329         // to produce negative zero.
3330         if (shouldCheckNegativeZero(node->arithMode())) {
3331             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3332             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3333             numeratorNonZero.link(&m_jit);
3334         }
3335     
3336         if (op2TempGPR != InvalidGPRReg) {
3337             m_jit.move(op2GPR, op2TempGPR);
3338             op2GPR = op2TempGPR;
3339         }
3340             
3341         m_jit.move(op1GPR, eax.gpr());
3342         m_jit.assembler().cdq();
3343         m_jit.assembler().idivl_r(op2GPR);
3344             
3345         if (op2TempGPR != InvalidGPRReg)
3346             unlock(op2TempGPR);
3347
3348         // Check that there was no remainder. If there had been, then we'd be obligated to
3349         // produce a double result instead.
3350         if (shouldCheckOverflow(node->arithMode()))
3351             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
3352         
3353         done.link(&m_jit);
3354         int32Result(eax.gpr(), node);
3355 #elif HAVE(ARM_IDIV_INSTRUCTIONS) || CPU(ARM64)
3356         SpeculateInt32Operand op1(this, node->child1());
3357         SpeculateInt32Operand op2(this, node->child2());
3358         GPRReg op1GPR = op1.gpr();
3359         GPRReg op2GPR = op2.gpr();
3360         GPRTemporary quotient(this);
3361         GPRTemporary multiplyAnswer(this);
3362
3363         // If the user cares about negative zero, then speculate that we're not about
3364         // to produce negative zero.
3365         if (shouldCheckNegativeZero(node->arithMode())) {
3366             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3367             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3368             numeratorNonZero.link(&m_jit);
3369         }
3370
3371         m_jit.assembler().sdiv<32>(quotient.gpr(), op1GPR, op2GPR);
3372
3373         // Check that there was no remainder. If there had been, then we'd be obligated to
3374         // produce a double result instead.
3375         if (shouldCheckOverflow(node->arithMode())) {
3376             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
3377             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
3378         }
3379
3380         int32Result(quotient.gpr(), node);
3381 #else
3382         RELEASE_ASSERT_NOT_REACHED();
3383 #endif
3384         break;
3385     }
3386         
3387     case DoubleRepUse: {
3388         SpeculateDoubleOperand op1(this, node->child1());
3389         SpeculateDoubleOperand op2(this, node->child2());
3390         FPRTemporary result(this, op1);
3391         
3392         FPRReg reg1 = op1.fpr();
3393         FPRReg reg2 = op2.fpr();
3394         m_jit.divDouble(reg1, reg2, result.fpr());
3395         
3396         doubleResult(result.fpr(), node);
3397         break;
3398     }
3399         
3400     default:
3401         RELEASE_ASSERT_NOT_REACHED();
3402         break;
3403     }
3404 }
3405
3406 void SpeculativeJIT::compileArithMod(Node* node)
3407 {
3408     switch (node->binaryUseKind()) {
3409     case Int32Use: {
3410         // In the fast path, the dividend value could be the final result
3411         // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
3412         SpeculateStrictInt32Operand op1(this, node->child1());
3413         
3414         if (node->child2()->isInt32Constant()) {
3415             int32_t divisor = node->child2()->asInt32();
3416             if (divisor > 1 && hasOneBitSet(divisor)) {
3417                 unsigned logarithm = WTF::fastLog2(divisor);
3418                 GPRReg dividendGPR = op1.gpr();
3419                 GPRTemporary result(this);
3420                 GPRReg resultGPR = result.gpr();
3421
3422                 // This is what LLVM generates. It's pretty crazy. Here's my
3423                 // attempt at understanding it.
3424                 
3425                 // First, compute either divisor - 1, or 0, depending on whether
3426                 // the dividend is negative:
3427                 //
3428                 // If dividend < 0:  resultGPR = divisor - 1
3429                 // If dividend >= 0: resultGPR = 0
3430                 m_jit.move(dividendGPR, resultGPR);
3431                 m_jit.rshift32(TrustedImm32(31), resultGPR);
3432                 m_jit.urshift32(TrustedImm32(32 - logarithm), resultGPR);
3433                 
3434                 // Add in the dividend, so that:
3435                 //
3436                 // If dividend < 0:  resultGPR = dividend + divisor - 1
3437                 // If dividend >= 0: resultGPR = dividend
3438                 m_jit.add32(dividendGPR, resultGPR);
3439                 
3440                 // Mask so as to only get the *high* bits. This rounds down
3441                 // (towards negative infinity) resultGPR to the nearest multiple
3442                 // of divisor, so that:
3443                 //
3444                 // If dividend < 0:  resultGPR = floor((dividend + divisor - 1) / divisor)
3445                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3446                 //
3447                 // Note that this can be simplified to:
3448                 //
3449                 // If dividend < 0:  resultGPR = ceil(dividend / divisor)
3450                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3451                 //
3452                 // Note that if the dividend is negative, resultGPR will also be negative.
3453                 // Regardless of the sign of dividend, resultGPR will be rounded towards
3454                 // zero, because of how things are conditionalized.
3455                 m_jit.and32(TrustedImm32(-divisor), resultGPR);
3456                 
3457                 // Subtract resultGPR from dividendGPR, which yields the remainder:
3458                 //
3459                 // resultGPR = dividendGPR - resultGPR
3460                 m_jit.neg32(resultGPR);
3461                 m_jit.add32(dividendGPR, resultGPR);
3462                 
3463                 if (shouldCheckNegativeZero(node->arithMode())) {
3464                     // Check that we're not about to create negative zero.
3465                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3466                     speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, resultGPR));
3467                     numeratorPositive.link(&m_jit);
3468                 }
3469
3470                 int32Result(resultGPR, node);
3471                 return;
3472             }
3473         }
3474         
3475 #if CPU(X86) || CPU(X86_64)
3476         if (node->child2()->isInt32Constant()) {
3477             int32_t divisor = node->child2()->asInt32();
3478             if (divisor && divisor != -1) {
3479                 GPRReg op1Gpr = op1.gpr();
3480
3481                 GPRTemporary eax(this, X86Registers::eax);
3482                 GPRTemporary edx(this, X86Registers::edx);
3483                 GPRTemporary scratch(this);
3484                 GPRReg scratchGPR = scratch.gpr();
3485
3486                 GPRReg op1SaveGPR;
3487                 if (op1Gpr == X86Registers::eax || op1Gpr == X86Registers::edx) {
3488                     op1SaveGPR = allocate();
3489                     ASSERT(op1Gpr != op1SaveGPR);
3490                     m_jit.move(op1Gpr, op1SaveGPR);
3491                 } else
3492                     op1SaveGPR = op1Gpr;
3493                 ASSERT(op1SaveGPR != X86Registers::eax);
3494                 ASSERT(op1SaveGPR != X86Registers::edx);
3495
3496                 m_jit.move(op1Gpr, eax.gpr());
3497                 m_jit.move(TrustedImm32(divisor), scratchGPR);
3498                 m_jit.assembler().cdq();
3499                 m_jit.assembler().idivl_r(scratchGPR);
3500                 if (shouldCheckNegativeZero(node->arithMode())) {
3501                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3502                     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3503                     numeratorPositive.link(&m_jit);
3504                 }
3505             
3506                 if (op1SaveGPR != op1Gpr)
3507                     unlock(op1SaveGPR);
3508
3509                 int32Result(edx.gpr(), node);
3510                 return;
3511             }
3512         }
3513 #endif
3514
3515         SpeculateInt32Operand op2(this, node->child2());
3516 #if CPU(X86) || CPU(X86_64)
3517         GPRTemporary eax(this, X86Registers::eax);
3518         GPRTemporary edx(this, X86Registers::edx);
3519         GPRReg op1GPR = op1.gpr();
3520         GPRReg op2GPR = op2.gpr();
3521     
3522         GPRReg op2TempGPR;
3523         GPRReg temp;
3524         GPRReg op1SaveGPR;
3525     
3526         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3527             op2TempGPR = allocate();
3528             temp = op2TempGPR;
3529         } else {
3530             op2TempGPR = InvalidGPRReg;
3531             if (op1GPR == X86Registers::eax)
3532                 temp = X86Registers::edx;
3533             else
3534                 temp = X86Registers::eax;
3535         }
3536     
3537         if (op1GPR == X86Registers::eax || op1GPR == X86Registers::edx) {
3538             op1SaveGPR = allocate();
3539             ASSERT(op1GPR != op1SaveGPR);
3540             m_jit.move(op1GPR, op1SaveGPR);
3541         } else
3542             op1SaveGPR = op1GPR;
3543     
3544         ASSERT(temp != op1GPR);
3545         ASSERT(temp != op2GPR);
3546         ASSERT(op1SaveGPR != X86Registers::eax);
3547         ASSERT(op1SaveGPR != X86Registers::edx);
3548     
3549         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3550     
3551         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3552     
3553         JITCompiler::JumpList done;
3554         
3555         // FIXME: -2^31 / -1 will actually yield negative zero, so we could have a
3556         // separate case for that. But it probably doesn't matter so much.
3557         if (shouldCheckOverflow(node->arithMode())) {
3558             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3559             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3560         } else {
3561             // This is the case where we convert the result to an int after we're done, and we
3562             // already know that the denominator is either -1 or 0. So, if the denominator is
3563             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3564             // -1) and the numerator is -2^31 then the result should be 0. Otherwise we are
3565             // happy to fall through to a normal division, since we're just dividing something
3566             // by negative 1.
3567         
3568             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3569             m_jit.move(TrustedImm32(0), edx.gpr());
3570             done.append(m_jit.jump());
3571         
3572             notZero.link(&m_jit);
3573             JITCompiler::Jump notNeg2ToThe31 =
3574                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3575             m_jit.move(TrustedImm32(0), edx.gpr());
3576             done.append(m_jit.jump());
3577         
3578             notNeg2ToThe31.link(&m_jit);
3579         }
3580         
3581         safeDenominator.link(&m_jit);
3582             
3583         if (op2TempGPR != InvalidGPRReg) {
3584             m_jit.move(op2GPR, op2TempGPR);
3585             op2GPR = op2TempGPR;
3586         }
3587             
3588         m_jit.move(op1GPR, eax.gpr());
3589         m_jit.assembler().cdq();
3590         m_jit.assembler().idivl_r(op2GPR);
3591             
3592         if (op2TempGPR != InvalidGPRReg)
3593             unlock(op2TempGPR);
3594
3595         // Check that we're not about to create negative zero.
3596         if (shouldCheckNegativeZero(node->arithMode())) {
3597             JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3598             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3599             numeratorPositive.link(&m_jit);
3600         }
3601     
3602         if (op1SaveGPR != op1GPR)
3603             unlock(op1SaveGPR);
3604             
3605         done.link(&m_jit);
3606         int32Result(edx.gpr(), node);
3607
3608 #elif HAVE(ARM_IDIV_INSTRUCTIONS) || CPU(ARM64)
3609         GPRTemporary temp(this);
3610         GPRTemporary quotientThenRemainder(this);
3611         GPRTemporary multiplyAnswer(this);
3612         GPRReg dividendGPR = op1.gpr();
3613         GPRReg divisorGPR = op2.gpr();
3614         GPRReg quotientThenRemainderGPR = quotientThenRemainder.gpr();
3615         GPRReg multiplyAnswerGPR = multiplyAnswer.gpr();
3616
3617         JITCompiler::JumpList done;
3618     
3619         if (shouldCheckOverflow(node->arithMode()))
3620             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, divisorGPR));
3621         else {
3622             JITCompiler::Jump denominatorNotZero = m_jit.branchTest32(JITCompiler::NonZero, divisorGPR);
3623             m_jit.move(divisorGPR, quotientThenRemainderGPR);
3624             done.append(m_jit.jump());
3625             denominatorNotZero.link(&m_jit);
3626         }
3627
3628         m_jit.assembler().sdiv<32>(quotientThenRemainderGPR, dividendGPR, divisorGPR);
3629         // FIXME: It seems like there are cases where we don't need this? What if we have
3630         // arithMode() == Arith::Unchecked?
3631         // https://bugs.webkit.org/show_bug.cgi?id=126444
3632         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotientThenRemainderGPR, divisorGPR, multiplyAnswerGPR));
3633 #if HAVE(ARM_IDIV_INSTRUCTIONS)
3634         m_jit.assembler().sub(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3635 #else
3636         m_jit.assembler().sub<32>(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3637 #endif
3638
3639         // If the user cares about negative zero, then speculate that we're not about
3640         // to produce negative zero.
3641         if (shouldCheckNegativeZero(node->arithMode())) {
3642             // Check that we're not about to create negative zero.
3643             JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3644             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, quotientThenRemainderGPR));
3645             numeratorPositive.link(&m_jit);
3646         }
3647
3648         done.link(&m_jit);
3649
3650         int32Result(quotientThenRemainderGPR, node);
3651 #else // not architecture that can do integer division
3652         RELEASE_ASSERT_NOT_REACHED();
3653 #endif
3654         return;
3655     }
3656         
3657     case DoubleRepUse: {
3658         SpeculateDoubleOperand op1(this, node->child1());
3659         SpeculateDoubleOperand op2(this, node->child2());
3660         
3661         FPRReg op1FPR = op1.fpr();
3662         FPRReg op2FPR = op2.fpr();
3663         
3664         flushRegisters();
3665         
3666         FPRResult result(this);
3667         
3668         callOperation(fmodAsDFGOperation, result.fpr(), op1FPR, op2FPR);
3669         
3670         doubleResult(result.fpr(), node);
3671         return;
3672     }
3673         
3674     default:
3675         RELEASE_ASSERT_NOT_REACHED();
3676         return;
3677     }
3678 }
3679
3680 void SpeculativeJIT::compileArithRound(Node* node)
3681 {
3682     ASSERT(node->child1().useKind() == DoubleRepUse);
3683
3684     SpeculateDoubleOperand value(this, node->child1());
3685     FPRReg valueFPR = value.fpr();
3686
3687     if (producesInteger(node->arithRoundingMode()) && !shouldCheckNegativeZero(node->arithRoundingMode())) {
3688         FPRTemporary oneHalf(this);
3689         GPRTemporary roundedResultAsInt32(this);
3690         FPRReg oneHalfFPR = oneHalf.fpr();
3691         GPRReg resultGPR = roundedResultAsInt32.gpr();
3692
3693         static const double halfConstant = 0.5;
3694         m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), oneHalfFPR);
3695         m_jit.addDouble(valueFPR, oneHalfFPR);
3696
3697         JITCompiler::Jump truncationFailed = m_jit.branchTruncateDoubleToInt32(oneHalfFPR, resultGPR);
3698         speculationCheck(Overflow, JSValueRegs(), node, truncationFailed);
3699         int32Result(resultGPR, node);
3700         return;
3701     }
3702
3703     flushRegisters();
3704     FPRResult roundedResultAsDouble(this);
3705     FPRReg resultFPR = roundedResultAsDouble.fpr();
3706     callOperation(jsRound, resultFPR, valueFPR);
3707     if (producesInteger(node->arithRoundingMode())) {
3708         GPRTemporary roundedResultAsInt32(this);
3709         FPRTemporary scratch(this);
3710         FPRReg scratchFPR = scratch.fpr();
3711         GPRReg resultGPR = roundedResultAsInt32.gpr();
3712         JITCompiler::JumpList failureCases;
3713         m_jit.branchConvertDoubleToInt32(resultFPR, resultGPR, failureCases, scratchFPR);
3714         speculationCheck(Overflow, JSValueRegs(), node, failureCases);
3715
3716         int32Result(resultGPR, node);
3717     } else
3718         doubleResult(resultFPR, node);
3719 }
3720
3721 void SpeculativeJIT::compileArithSqrt(Node* node)
3722 {
3723     SpeculateDoubleOperand op1(this, node->child1());
3724     FPRReg op1FPR = op1.fpr();
3725
3726     if (!MacroAssembler::supportsFloatingPointSqrt() || !Options::enableArchitectureSpecificOptimizations()) {
3727         flushRegisters();
3728         FPRResult result(this);
3729         callOperation(sqrt, result.fpr(), op1FPR);
3730         doubleResult(result.fpr(), node);
3731     } else {
3732         FPRTemporary result(this, op1);
3733         m_jit.sqrtDouble(op1.fpr(), result.fpr());
3734         doubleResult(result.fpr(), node);
3735     }
3736 }
3737
3738 // For small positive integers , it is worth doing a tiny inline loop to exponentiate the base.
3739 // Every register is clobbered by this helper.
3740 static MacroAssembler::Jump compileArithPowIntegerFastPath(JITCompiler& assembler, FPRReg xOperand, GPRReg yOperand, FPRReg result)
3741 {
3742     MacroAssembler::JumpList skipFastPath;
3743     skipFastPath.append(assembler.branch32(MacroAssembler::LessThan, yOperand, MacroAssembler::TrustedImm32(0)));
3744     skipFastPath.append(assembler.branch32(MacroAssembler::GreaterThan, yOperand, MacroAssembler::TrustedImm32(1000)));
3745
3746     static const double oneConstant = 1.0;
3747     assembler.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant), result);
3748
3749     MacroAssembler::Label startLoop(assembler.label());
3750     MacroAssembler::Jump exponentIsEven = assembler.branchTest32(MacroAssembler::Zero, yOperand, MacroAssembler::TrustedImm32(1));
3751     assembler.mulDouble(xOperand, result);
3752     exponentIsEven.link(&assembler);
3753     assembler.mulDouble(xOperand, xOperand);
3754     assembler.rshift32(MacroAssembler::TrustedImm32(1), yOperand);
3755     assembler.branchTest32(MacroAssembler::NonZero, yOperand).linkTo(startLoop, &assembler);
3756
3757     MacroAssembler::Jump skipSlowPath = assembler.jump();
3758     skipFastPath.link(&assembler);
3759
3760     return skipSlowPath;
3761 }
3762
3763 void SpeculativeJIT::compileArithPow(Node* node)
3764 {
3765     if (node->child2().useKind() == Int32Use) {
3766         SpeculateDoubleOperand xOperand(this, node->child1());
3767         SpeculateInt32Operand yOperand(this, node->child2());
3768         FPRReg xOperandfpr = xOperand.fpr();
3769         GPRReg yOperandGpr = yOperand.gpr();
3770         FPRTemporary yOperandfpr(this);
3771
3772         flushRegisters();
3773
3774         FPRResult result(this);
3775         FPRReg resultFpr = result.fpr();
3776
3777         FPRTemporary xOperandCopy(this);
3778         FPRReg xOperandCopyFpr = xOperandCopy.fpr();
3779         m_jit.moveDouble(xOperandfpr, xOperandCopyFpr);
3780
3781         GPRTemporary counter(this);
3782         GPRReg counterGpr = counter.gpr();
3783         m_jit.move(yOperandGpr, counterGpr);
3784
3785         MacroAssembler::Jump skipFallback = compileArithPowIntegerFastPath(m_jit, xOperandCopyFpr, counterGpr, resultFpr);
3786         m_jit.convertInt32ToDouble(yOperandGpr, yOperandfpr.fpr());
3787         callOperation(operationMathPow, resultFpr, xOperandfpr, yOperandfpr.fpr());
3788
3789         skipFallback.link(&m_jit);
3790         doubleResult(resultFpr, node);
3791         return;
3792     }
3793
3794     SpeculateDoubleOperand xOperand(this, node->child1());
3795     SpeculateDoubleOperand yOperand(this, node->child2());
3796     FPRReg xOperandfpr = xOperand.fpr();