[ES6] Implement ES6 arrow function syntax. Arrow function specific features. Lexical...
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGOSRExitFuzz.h"
38 #include "DFGSaneStringGetByValSlowPathGenerator.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DirectArguments.h"
41 #include "JSArrowFunction.h"
42 #include "JSCInlines.h"
43 #include "JSEnvironmentRecord.h"
44 #include "JSLexicalEnvironment.h"
45 #include "LinkBuffer.h"
46 #include "ScopedArguments.h"
47 #include "ScratchRegisterAllocator.h"
48 #include "WriteBarrierBuffer.h"
49 #include <wtf/MathExtras.h>
50
51 namespace JSC { namespace DFG {
52
53 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
54     : m_compileOkay(true)
55     , m_jit(jit)
56     , m_currentNode(0)
57     , m_lastGeneratedNode(LastNodeType)
58     , m_indexInBlock(0)
59     , m_generationInfo(m_jit.graph().frameRegisterCount())
60     , m_state(m_jit.graph())
61     , m_interpreter(m_jit.graph(), m_state)
62     , m_stream(&jit.jitCode()->variableEventStream)
63     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
64     , m_isCheckingArgumentTypes(false)
65 {
66 }
67
68 SpeculativeJIT::~SpeculativeJIT()
69 {
70 }
71
72 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
73 {
74     ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
75     
76     GPRTemporary scratch(this);
77     GPRTemporary scratch2(this);
78     GPRReg scratchGPR = scratch.gpr();
79     GPRReg scratch2GPR = scratch2.gpr();
80     
81     unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
82     
83     JITCompiler::JumpList slowCases;
84     
85     slowCases.append(
86         emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
87     m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
88     emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
89     
90     m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
91     m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
92     
93     if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
94 #if USE(JSVALUE64)
95         m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
96         for (unsigned i = numElements; i < vectorLength; ++i)
97             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
98 #else
99         EncodedValueDescriptor value;
100         value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
101         for (unsigned i = numElements; i < vectorLength; ++i) {
102             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
103             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
104         }
105 #endif
106     }
107     
108     // I want a slow path that also loads out the storage pointer, and that's
109     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
110     // of work for a very small piece of functionality. :-/
111     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
112         slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
113         structure, numElements));
114 }
115
116 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
117 {
118     if (inlineCallFrame && !inlineCallFrame->isVarargs())
119         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
120     else {
121         VirtualRegister argumentCountRegister;
122         if (!inlineCallFrame)
123             argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
124         else
125             argumentCountRegister = inlineCallFrame->argumentCountRegister;
126         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
127         if (!includeThis)
128             m_jit.sub32(TrustedImm32(1), lengthGPR);
129     }
130 }
131
132 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
133 {
134     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
135 }
136
137 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
138 {
139     if (origin.inlineCallFrame) {
140         if (origin.inlineCallFrame->isClosureCall) {
141             m_jit.loadPtr(
142                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
143                 calleeGPR);
144         } else {
145             m_jit.move(
146                 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
147                 calleeGPR);
148         }
149     } else
150         m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), calleeGPR);
151 }
152
153 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
154 {
155     m_jit.addPtr(
156         TrustedImm32(
157             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
158         GPRInfo::callFrameRegister, startGPR);
159 }
160
161 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
162 {
163     if (!doOSRExitFuzzing())
164         return MacroAssembler::Jump();
165     
166     MacroAssembler::Jump result;
167     
168     m_jit.pushToSave(GPRInfo::regT0);
169     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
170     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
171     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
172     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
173     unsigned at = Options::fireOSRExitFuzzAt();
174     if (at || atOrAfter) {
175         unsigned threshold;
176         MacroAssembler::RelationalCondition condition;
177         if (atOrAfter) {
178             threshold = atOrAfter;
179             condition = MacroAssembler::Below;
180         } else {
181             threshold = at;
182             condition = MacroAssembler::NotEqual;
183         }
184         MacroAssembler::Jump ok = m_jit.branch32(
185             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
186         m_jit.popToRestore(GPRInfo::regT0);
187         result = m_jit.jump();
188         ok.link(&m_jit);
189     }
190     m_jit.popToRestore(GPRInfo::regT0);
191     
192     return result;
193 }
194
195 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
196 {
197     if (!m_compileOkay)
198         return;
199     ASSERT(m_isCheckingArgumentTypes || m_canExit);
200     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
201     if (fuzzJump.isSet()) {
202         JITCompiler::JumpList jumpsToFail;
203         jumpsToFail.append(fuzzJump);
204         jumpsToFail.append(jumpToFail);
205         m_jit.appendExitInfo(jumpsToFail);
206     } else
207         m_jit.appendExitInfo(jumpToFail);
208     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
209 }
210
211 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
212 {
213     if (!m_compileOkay)
214         return;
215     ASSERT(m_isCheckingArgumentTypes || m_canExit);
216     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
217     if (fuzzJump.isSet()) {
218         JITCompiler::JumpList myJumpsToFail;
219         myJumpsToFail.append(jumpsToFail);
220         myJumpsToFail.append(fuzzJump);
221         m_jit.appendExitInfo(myJumpsToFail);
222     } else
223         m_jit.appendExitInfo(jumpsToFail);
224     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
225 }
226
227 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
228 {
229     if (!m_compileOkay)
230         return OSRExitJumpPlaceholder();
231     ASSERT(m_isCheckingArgumentTypes || m_canExit);
232     unsigned index = m_jit.jitCode()->osrExit.size();
233     m_jit.appendExitInfo();
234     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
235     return OSRExitJumpPlaceholder(index);
236 }
237
238 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
239 {
240     ASSERT(m_isCheckingArgumentTypes || m_canExit);
241     return speculationCheck(kind, jsValueSource, nodeUse.node());
242 }
243
244 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
245 {
246     ASSERT(m_isCheckingArgumentTypes || m_canExit);
247     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
248 }
249
250 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
251 {
252     ASSERT(m_isCheckingArgumentTypes || m_canExit);
253     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
254 }
255
256 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
257 {
258     if (!m_compileOkay)
259         return;
260     ASSERT(m_isCheckingArgumentTypes || m_canExit);
261     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
262     m_jit.appendExitInfo(jumpToFail);
263     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
264 }
265
266 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
267 {
268     ASSERT(m_isCheckingArgumentTypes || m_canExit);
269     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
270 }
271
272 void SpeculativeJIT::emitInvalidationPoint(Node* node)
273 {
274     if (!m_compileOkay)
275         return;
276     ASSERT(m_canExit);
277     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
278     m_jit.jitCode()->appendOSRExit(OSRExit(
279         UncountableInvalidation, JSValueSource(),
280         m_jit.graph().methodOfGettingAValueProfileFor(node),
281         this, m_stream->size()));
282     info.m_replacementSource = m_jit.watchpointLabel();
283     ASSERT(info.m_replacementSource.isSet());
284     noResult(node);
285 }
286
287 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
288 {
289     ASSERT(m_isCheckingArgumentTypes || m_canExit);
290     if (!m_compileOkay)
291         return;
292     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
293     m_compileOkay = false;
294     if (verboseCompilationEnabled())
295         dataLog("Bailing compilation.\n");
296 }
297
298 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
299 {
300     ASSERT(m_isCheckingArgumentTypes || m_canExit);
301     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
302 }
303
304 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
305 {
306     ASSERT(needsTypeCheck(edge, typesPassedThrough));
307     m_interpreter.filter(edge, typesPassedThrough);
308     speculationCheck(BadType, source, edge.node(), jumpToFail);
309 }
310
311 RegisterSet SpeculativeJIT::usedRegisters()
312 {
313     RegisterSet result;
314     
315     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
316         GPRReg gpr = GPRInfo::toRegister(i);
317         if (m_gprs.isInUse(gpr))
318             result.set(gpr);
319     }
320     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
321         FPRReg fpr = FPRInfo::toRegister(i);
322         if (m_fprs.isInUse(fpr))
323             result.set(fpr);
324     }
325     
326     result.merge(RegisterSet::specialRegisters());
327     
328     return result;
329 }
330
331 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
332 {
333     m_slowPathGenerators.append(WTF::move(slowPathGenerator));
334 }
335
336 void SpeculativeJIT::runSlowPathGenerators()
337 {
338     for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
339         m_slowPathGenerators[i]->generate(this);
340 }
341
342 // On Windows we need to wrap fmod; on other platforms we can call it directly.
343 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
344 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
345 static double JIT_OPERATION fmodAsDFGOperation(double x, double y)
346 {
347     return fmod(x, y);
348 }
349 #else
350 #define fmodAsDFGOperation fmod
351 #endif
352
353 void SpeculativeJIT::clearGenerationInfo()
354 {
355     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
356         m_generationInfo[i] = GenerationInfo();
357     m_gprs = RegisterBank<GPRInfo>();
358     m_fprs = RegisterBank<FPRInfo>();
359 }
360
361 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
362 {
363     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
364     Node* node = info.node();
365     DataFormat registerFormat = info.registerFormat();
366     ASSERT(registerFormat != DataFormatNone);
367     ASSERT(registerFormat != DataFormatDouble);
368         
369     SilentSpillAction spillAction;
370     SilentFillAction fillAction;
371         
372     if (!info.needsSpill())
373         spillAction = DoNothingForSpill;
374     else {
375 #if USE(JSVALUE64)
376         ASSERT(info.gpr() == source);
377         if (registerFormat == DataFormatInt32)
378             spillAction = Store32Payload;
379         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
380             spillAction = StorePtr;
381         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
382             spillAction = Store64;
383         else {
384             ASSERT(registerFormat & DataFormatJS);
385             spillAction = Store64;
386         }
387 #elif USE(JSVALUE32_64)
388         if (registerFormat & DataFormatJS) {
389             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
390             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
391         } else {
392             ASSERT(info.gpr() == source);
393             spillAction = Store32Payload;
394         }
395 #endif
396     }
397         
398     if (registerFormat == DataFormatInt32) {
399         ASSERT(info.gpr() == source);
400         ASSERT(isJSInt32(info.registerFormat()));
401         if (node->hasConstant()) {
402             ASSERT(node->isInt32Constant());
403             fillAction = SetInt32Constant;
404         } else
405             fillAction = Load32Payload;
406     } else if (registerFormat == DataFormatBoolean) {
407 #if USE(JSVALUE64)
408         RELEASE_ASSERT_NOT_REACHED();
409 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
410         fillAction = DoNothingForFill;
411 #endif
412 #elif USE(JSVALUE32_64)
413         ASSERT(info.gpr() == source);
414         if (node->hasConstant()) {
415             ASSERT(node->isBooleanConstant());
416             fillAction = SetBooleanConstant;
417         } else
418             fillAction = Load32Payload;
419 #endif
420     } else if (registerFormat == DataFormatCell) {
421         ASSERT(info.gpr() == source);
422         if (node->hasConstant()) {
423             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
424             node->asCell(); // To get the assertion.
425             fillAction = SetCellConstant;
426         } else {
427 #if USE(JSVALUE64)
428             fillAction = LoadPtr;
429 #else
430             fillAction = Load32Payload;
431 #endif
432         }
433     } else if (registerFormat == DataFormatStorage) {
434         ASSERT(info.gpr() == source);
435         fillAction = LoadPtr;
436     } else if (registerFormat == DataFormatInt52) {
437         if (node->hasConstant())
438             fillAction = SetInt52Constant;
439         else if (info.spillFormat() == DataFormatInt52)
440             fillAction = Load64;
441         else if (info.spillFormat() == DataFormatStrictInt52)
442             fillAction = Load64ShiftInt52Left;
443         else if (info.spillFormat() == DataFormatNone)
444             fillAction = Load64;
445         else {
446             RELEASE_ASSERT_NOT_REACHED();
447 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
448             fillAction = Load64; // Make GCC happy.
449 #endif
450         }
451     } else if (registerFormat == DataFormatStrictInt52) {
452         if (node->hasConstant())
453             fillAction = SetStrictInt52Constant;
454         else if (info.spillFormat() == DataFormatInt52)
455             fillAction = Load64ShiftInt52Right;
456         else if (info.spillFormat() == DataFormatStrictInt52)
457             fillAction = Load64;
458         else if (info.spillFormat() == DataFormatNone)
459             fillAction = Load64;
460         else {
461             RELEASE_ASSERT_NOT_REACHED();
462 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
463             fillAction = Load64; // Make GCC happy.
464 #endif
465         }
466     } else {
467         ASSERT(registerFormat & DataFormatJS);
468 #if USE(JSVALUE64)
469         ASSERT(info.gpr() == source);
470         if (node->hasConstant()) {
471             if (node->isCellConstant())
472                 fillAction = SetTrustedJSConstant;
473             else
474                 fillAction = SetJSConstant;
475         } else if (info.spillFormat() == DataFormatInt32) {
476             ASSERT(registerFormat == DataFormatJSInt32);
477             fillAction = Load32PayloadBoxInt;
478         } else
479             fillAction = Load64;
480 #else
481         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
482         if (node->hasConstant())
483             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
484         else if (info.payloadGPR() == source)
485             fillAction = Load32Payload;
486         else { // Fill the Tag
487             switch (info.spillFormat()) {
488             case DataFormatInt32:
489                 ASSERT(registerFormat == DataFormatJSInt32);
490                 fillAction = SetInt32Tag;
491                 break;
492             case DataFormatCell:
493                 ASSERT(registerFormat == DataFormatJSCell);
494                 fillAction = SetCellTag;
495                 break;
496             case DataFormatBoolean:
497                 ASSERT(registerFormat == DataFormatJSBoolean);
498                 fillAction = SetBooleanTag;
499                 break;
500             default:
501                 fillAction = Load32Tag;
502                 break;
503             }
504         }
505 #endif
506     }
507         
508     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
509 }
510     
511 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
512 {
513     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
514     Node* node = info.node();
515     ASSERT(info.registerFormat() == DataFormatDouble);
516
517     SilentSpillAction spillAction;
518     SilentFillAction fillAction;
519         
520     if (!info.needsSpill())
521         spillAction = DoNothingForSpill;
522     else {
523         ASSERT(!node->hasConstant());
524         ASSERT(info.spillFormat() == DataFormatNone);
525         ASSERT(info.fpr() == source);
526         spillAction = StoreDouble;
527     }
528         
529 #if USE(JSVALUE64)
530     if (node->hasConstant()) {
531         node->asNumber(); // To get the assertion.
532         fillAction = SetDoubleConstant;
533     } else {
534         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
535         fillAction = LoadDouble;
536     }
537 #elif USE(JSVALUE32_64)
538     ASSERT(info.registerFormat() == DataFormatDouble);
539     if (node->hasConstant()) {
540         node->asNumber(); // To get the assertion.
541         fillAction = SetDoubleConstant;
542     } else
543         fillAction = LoadDouble;
544 #endif
545
546     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
547 }
548     
549 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
550 {
551     switch (plan.spillAction()) {
552     case DoNothingForSpill:
553         break;
554     case Store32Tag:
555         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
556         break;
557     case Store32Payload:
558         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
559         break;
560     case StorePtr:
561         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
562         break;
563 #if USE(JSVALUE64)
564     case Store64:
565         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
566         break;
567 #endif
568     case StoreDouble:
569         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
570         break;
571     default:
572         RELEASE_ASSERT_NOT_REACHED();
573     }
574 }
575     
576 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
577 {
578 #if USE(JSVALUE32_64)
579     UNUSED_PARAM(canTrample);
580 #endif
581     switch (plan.fillAction()) {
582     case DoNothingForFill:
583         break;
584     case SetInt32Constant:
585         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
586         break;
587 #if USE(JSVALUE64)
588     case SetInt52Constant:
589         m_jit.move(Imm64(plan.node()->asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
590         break;
591     case SetStrictInt52Constant:
592         m_jit.move(Imm64(plan.node()->asMachineInt()), plan.gpr());
593         break;
594 #endif // USE(JSVALUE64)
595     case SetBooleanConstant:
596         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
597         break;
598     case SetCellConstant:
599         m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
600         break;
601 #if USE(JSVALUE64)
602     case SetTrustedJSConstant:
603         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
604         break;
605     case SetJSConstant:
606         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
607         break;
608     case SetDoubleConstant:
609         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
610         m_jit.move64ToDouble(canTrample, plan.fpr());
611         break;
612     case Load32PayloadBoxInt:
613         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
614         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
615         break;
616     case Load32PayloadConvertToInt52:
617         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
618         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
619         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
620         break;
621     case Load32PayloadSignExtend:
622         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
623         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
624         break;
625 #else
626     case SetJSConstantTag:
627         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
628         break;
629     case SetJSConstantPayload:
630         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
631         break;
632     case SetInt32Tag:
633         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
634         break;
635     case SetCellTag:
636         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
637         break;
638     case SetBooleanTag:
639         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
640         break;
641     case SetDoubleConstant:
642         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
643         break;
644 #endif
645     case Load32Tag:
646         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
647         break;
648     case Load32Payload:
649         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
650         break;
651     case LoadPtr:
652         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
653         break;
654 #if USE(JSVALUE64)
655     case Load64:
656         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
657         break;
658     case Load64ShiftInt52Right:
659         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
660         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
661         break;
662     case Load64ShiftInt52Left:
663         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
664         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
665         break;
666 #endif
667     case LoadDouble:
668         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
669         break;
670     default:
671         RELEASE_ASSERT_NOT_REACHED();
672     }
673 }
674     
675 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
676 {
677     switch (arrayMode.arrayClass()) {
678     case Array::OriginalArray: {
679         CRASH();
680 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
681         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
682         return result;
683 #endif
684     }
685         
686     case Array::Array:
687         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
688         return m_jit.branch32(
689             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
690         
691     case Array::NonArray:
692     case Array::OriginalNonArray:
693         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
694         return m_jit.branch32(
695             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
696         
697     case Array::PossiblyArray:
698         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
699         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
700     }
701     
702     RELEASE_ASSERT_NOT_REACHED();
703     return JITCompiler::Jump();
704 }
705
706 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
707 {
708     JITCompiler::JumpList result;
709     
710     switch (arrayMode.type()) {
711     case Array::Int32:
712         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
713
714     case Array::Double:
715         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
716
717     case Array::Contiguous:
718         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
719
720     case Array::Undecided:
721         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
722
723     case Array::ArrayStorage:
724     case Array::SlowPutArrayStorage: {
725         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
726         
727         if (arrayMode.isJSArray()) {
728             if (arrayMode.isSlowPut()) {
729                 result.append(
730                     m_jit.branchTest32(
731                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
732                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
733                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
734                 result.append(
735                     m_jit.branch32(
736                         MacroAssembler::Above, tempGPR,
737                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
738                 break;
739             }
740             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
741             result.append(
742                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
743             break;
744         }
745         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
746         if (arrayMode.isSlowPut()) {
747             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
748             result.append(
749                 m_jit.branch32(
750                     MacroAssembler::Above, tempGPR,
751                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
752             break;
753         }
754         result.append(
755             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
756         break;
757     }
758     default:
759         CRASH();
760         break;
761     }
762     
763     return result;
764 }
765
766 void SpeculativeJIT::checkArray(Node* node)
767 {
768     ASSERT(node->arrayMode().isSpecific());
769     ASSERT(!node->arrayMode().doesConversion());
770     
771     SpeculateCellOperand base(this, node->child1());
772     GPRReg baseReg = base.gpr();
773     
774     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
775         noResult(m_currentNode);
776         return;
777     }
778     
779     const ClassInfo* expectedClassInfo = 0;
780     
781     switch (node->arrayMode().type()) {
782     case Array::String:
783         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
784         break;
785     case Array::Int32:
786     case Array::Double:
787     case Array::Contiguous:
788     case Array::Undecided:
789     case Array::ArrayStorage:
790     case Array::SlowPutArrayStorage: {
791         GPRTemporary temp(this);
792         GPRReg tempGPR = temp.gpr();
793         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
794         speculationCheck(
795             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
796             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
797         
798         noResult(m_currentNode);
799         return;
800     }
801     case Array::DirectArguments:
802         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
803         noResult(m_currentNode);
804         return;
805     case Array::ScopedArguments:
806         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
807         noResult(m_currentNode);
808         return;
809     default:
810         speculateCellTypeWithoutTypeFiltering(
811             node->child1(), baseReg,
812             typeForTypedArrayType(node->arrayMode().typedArrayType()));
813         noResult(m_currentNode);
814         return;
815     }
816     
817     RELEASE_ASSERT(expectedClassInfo);
818     
819     GPRTemporary temp(this);
820     GPRTemporary temp2(this);
821     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
822     speculationCheck(
823         BadType, JSValueSource::unboxedCell(baseReg), node,
824         m_jit.branchPtr(
825             MacroAssembler::NotEqual,
826             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
827             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
828     
829     noResult(m_currentNode);
830 }
831
832 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
833 {
834     ASSERT(node->arrayMode().doesConversion());
835     
836     GPRTemporary temp(this);
837     GPRTemporary structure;
838     GPRReg tempGPR = temp.gpr();
839     GPRReg structureGPR = InvalidGPRReg;
840     
841     if (node->op() != ArrayifyToStructure) {
842         GPRTemporary realStructure(this);
843         structure.adopt(realStructure);
844         structureGPR = structure.gpr();
845     }
846         
847     // We can skip all that comes next if we already have array storage.
848     MacroAssembler::JumpList slowPath;
849     
850     if (node->op() == ArrayifyToStructure) {
851         slowPath.append(m_jit.branchWeakStructure(
852             JITCompiler::NotEqual,
853             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
854             node->structure()));
855     } else {
856         m_jit.load8(
857             MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
858         
859         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
860     }
861     
862     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
863         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
864     
865     noResult(m_currentNode);
866 }
867
868 void SpeculativeJIT::arrayify(Node* node)
869 {
870     ASSERT(node->arrayMode().isSpecific());
871     
872     SpeculateCellOperand base(this, node->child1());
873     
874     if (!node->child2()) {
875         arrayify(node, base.gpr(), InvalidGPRReg);
876         return;
877     }
878     
879     SpeculateInt32Operand property(this, node->child2());
880     
881     arrayify(node, base.gpr(), property.gpr());
882 }
883
884 GPRReg SpeculativeJIT::fillStorage(Edge edge)
885 {
886     VirtualRegister virtualRegister = edge->virtualRegister();
887     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
888     
889     switch (info.registerFormat()) {
890     case DataFormatNone: {
891         if (info.spillFormat() == DataFormatStorage) {
892             GPRReg gpr = allocate();
893             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
894             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
895             info.fillStorage(*m_stream, gpr);
896             return gpr;
897         }
898         
899         // Must be a cell; fill it as a cell and then return the pointer.
900         return fillSpeculateCell(edge);
901     }
902         
903     case DataFormatStorage: {
904         GPRReg gpr = info.gpr();
905         m_gprs.lock(gpr);
906         return gpr;
907     }
908         
909     default:
910         return fillSpeculateCell(edge);
911     }
912 }
913
914 void SpeculativeJIT::useChildren(Node* node)
915 {
916     if (node->flags() & NodeHasVarArgs) {
917         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
918             if (!!m_jit.graph().m_varArgChildren[childIdx])
919                 use(m_jit.graph().m_varArgChildren[childIdx]);
920         }
921     } else {
922         Edge child1 = node->child1();
923         if (!child1) {
924             ASSERT(!node->child2() && !node->child3());
925             return;
926         }
927         use(child1);
928         
929         Edge child2 = node->child2();
930         if (!child2) {
931             ASSERT(!node->child3());
932             return;
933         }
934         use(child2);
935         
936         Edge child3 = node->child3();
937         if (!child3)
938             return;
939         use(child3);
940     }
941 }
942
943 void SpeculativeJIT::compileIn(Node* node)
944 {
945     SpeculateCellOperand base(this, node->child2());
946     GPRReg baseGPR = base.gpr();
947     
948     if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
949         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
950             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo();
951             
952             GPRTemporary result(this);
953             GPRReg resultGPR = result.gpr();
954
955             use(node->child1());
956             
957             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
958             MacroAssembler::Label done = m_jit.label();
959             
960             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
961             // we can cast it to const AtomicStringImpl* safely.
962             auto slowPath = slowPathCall(
963                 jump.m_jump, this, operationInOptimize,
964                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
965                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
966             
967             stubInfo->codeOrigin = node->origin.semantic;
968             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
969             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
970             stubInfo->patch.usedRegisters = usedRegisters();
971             stubInfo->patch.spillMode = NeedToSpill;
972
973             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
974             addSlowPathGenerator(WTF::move(slowPath));
975
976             base.use();
977
978             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
979             return;
980         }
981     }
982
983     JSValueOperand key(this, node->child1());
984     JSValueRegs regs = key.jsValueRegs();
985         
986     GPRFlushedCallResult result(this);
987     GPRReg resultGPR = result.gpr();
988         
989     base.use();
990     key.use();
991         
992     flushRegisters();
993     callOperation(
994         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
995         baseGPR, regs);
996     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
997 }
998
999 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1000 {
1001     unsigned branchIndexInBlock = detectPeepHoleBranch();
1002     if (branchIndexInBlock != UINT_MAX) {
1003         Node* branchNode = m_block->at(branchIndexInBlock);
1004
1005         ASSERT(node->adjustedRefCount() == 1);
1006         
1007         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1008     
1009         m_indexInBlock = branchIndexInBlock;
1010         m_currentNode = branchNode;
1011         
1012         return true;
1013     }
1014     
1015     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1016     
1017     return false;
1018 }
1019
1020 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1021 {
1022     unsigned branchIndexInBlock = detectPeepHoleBranch();
1023     if (branchIndexInBlock != UINT_MAX) {
1024         Node* branchNode = m_block->at(branchIndexInBlock);
1025
1026         ASSERT(node->adjustedRefCount() == 1);
1027         
1028         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1029     
1030         m_indexInBlock = branchIndexInBlock;
1031         m_currentNode = branchNode;
1032         
1033         return true;
1034     }
1035     
1036     nonSpeculativeNonPeepholeStrictEq(node, invert);
1037     
1038     return false;
1039 }
1040
1041 static const char* dataFormatString(DataFormat format)
1042 {
1043     // These values correspond to the DataFormat enum.
1044     const char* strings[] = {
1045         "[  ]",
1046         "[ i]",
1047         "[ d]",
1048         "[ c]",
1049         "Err!",
1050         "Err!",
1051         "Err!",
1052         "Err!",
1053         "[J ]",
1054         "[Ji]",
1055         "[Jd]",
1056         "[Jc]",
1057         "Err!",
1058         "Err!",
1059         "Err!",
1060         "Err!",
1061     };
1062     return strings[format];
1063 }
1064
1065 void SpeculativeJIT::dump(const char* label)
1066 {
1067     if (label)
1068         dataLogF("<%s>\n", label);
1069
1070     dataLogF("  gprs:\n");
1071     m_gprs.dump();
1072     dataLogF("  fprs:\n");
1073     m_fprs.dump();
1074     dataLogF("  VirtualRegisters:\n");
1075     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1076         GenerationInfo& info = m_generationInfo[i];
1077         if (info.alive())
1078             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1079         else
1080             dataLogF("    % 3d:[__][__]", i);
1081         if (info.registerFormat() == DataFormatDouble)
1082             dataLogF(":fpr%d\n", info.fpr());
1083         else if (info.registerFormat() != DataFormatNone
1084 #if USE(JSVALUE32_64)
1085             && !(info.registerFormat() & DataFormatJS)
1086 #endif
1087             ) {
1088             ASSERT(info.gpr() != InvalidGPRReg);
1089             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1090         } else
1091             dataLogF("\n");
1092     }
1093     if (label)
1094         dataLogF("</%s>\n", label);
1095 }
1096
1097 GPRTemporary::GPRTemporary()
1098     : m_jit(0)
1099     , m_gpr(InvalidGPRReg)
1100 {
1101 }
1102
1103 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1104     : m_jit(jit)
1105     , m_gpr(InvalidGPRReg)
1106 {
1107     m_gpr = m_jit->allocate();
1108 }
1109
1110 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1111     : m_jit(jit)
1112     , m_gpr(InvalidGPRReg)
1113 {
1114     m_gpr = m_jit->allocate(specific);
1115 }
1116
1117 #if USE(JSVALUE32_64)
1118 GPRTemporary::GPRTemporary(
1119     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1120     : m_jit(jit)
1121     , m_gpr(InvalidGPRReg)
1122 {
1123     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1124         m_gpr = m_jit->reuse(op1.gpr(which));
1125     else
1126         m_gpr = m_jit->allocate();
1127 }
1128 #endif // USE(JSVALUE32_64)
1129
1130 JSValueRegsTemporary::JSValueRegsTemporary() { }
1131
1132 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1133 #if USE(JSVALUE64)
1134     : m_gpr(jit)
1135 #else
1136     : m_payloadGPR(jit)
1137     , m_tagGPR(jit)
1138 #endif
1139 {
1140 }
1141
1142 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1143
1144 JSValueRegs JSValueRegsTemporary::regs()
1145 {
1146 #if USE(JSVALUE64)
1147     return JSValueRegs(m_gpr.gpr());
1148 #else
1149     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1150 #endif
1151 }
1152
1153 void GPRTemporary::adopt(GPRTemporary& other)
1154 {
1155     ASSERT(!m_jit);
1156     ASSERT(m_gpr == InvalidGPRReg);
1157     ASSERT(other.m_jit);
1158     ASSERT(other.m_gpr != InvalidGPRReg);
1159     m_jit = other.m_jit;
1160     m_gpr = other.m_gpr;
1161     other.m_jit = 0;
1162     other.m_gpr = InvalidGPRReg;
1163 }
1164
1165 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1166     : m_jit(jit)
1167     , m_fpr(InvalidFPRReg)
1168 {
1169     m_fpr = m_jit->fprAllocate();
1170 }
1171
1172 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1173     : m_jit(jit)
1174     , m_fpr(InvalidFPRReg)
1175 {
1176     if (m_jit->canReuse(op1.node()))
1177         m_fpr = m_jit->reuse(op1.fpr());
1178     else
1179         m_fpr = m_jit->fprAllocate();
1180 }
1181
1182 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1183     : m_jit(jit)
1184     , m_fpr(InvalidFPRReg)
1185 {
1186     if (m_jit->canReuse(op1.node()))
1187         m_fpr = m_jit->reuse(op1.fpr());
1188     else if (m_jit->canReuse(op2.node()))
1189         m_fpr = m_jit->reuse(op2.fpr());
1190     else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1191         m_fpr = m_jit->reuse(op1.fpr());
1192     else
1193         m_fpr = m_jit->fprAllocate();
1194 }
1195
1196 #if USE(JSVALUE32_64)
1197 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1198     : m_jit(jit)
1199     , m_fpr(InvalidFPRReg)
1200 {
1201     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1202         m_fpr = m_jit->reuse(op1.fpr());
1203     else
1204         m_fpr = m_jit->fprAllocate();
1205 }
1206 #endif
1207
1208 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1209 {
1210     BasicBlock* taken = branchNode->branchData()->taken.block;
1211     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1212     
1213     SpeculateDoubleOperand op1(this, node->child1());
1214     SpeculateDoubleOperand op2(this, node->child2());
1215     
1216     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1217     jump(notTaken);
1218 }
1219
1220 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1221 {
1222     BasicBlock* taken = branchNode->branchData()->taken.block;
1223     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1224
1225     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1226     
1227     if (taken == nextBlock()) {
1228         condition = MacroAssembler::NotEqual;
1229         BasicBlock* tmp = taken;
1230         taken = notTaken;
1231         notTaken = tmp;
1232     }
1233
1234     SpeculateCellOperand op1(this, node->child1());
1235     SpeculateCellOperand op2(this, node->child2());
1236     
1237     GPRReg op1GPR = op1.gpr();
1238     GPRReg op2GPR = op2.gpr();
1239     
1240     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1241         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1242             speculationCheck(
1243                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1244         }
1245         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1246             speculationCheck(
1247                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1248         }
1249     } else {
1250         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1251             speculationCheck(
1252                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1253                 m_jit.branchIfNotObject(op1GPR));
1254         }
1255         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1256             m_jit.branchTest8(
1257                 MacroAssembler::NonZero, 
1258                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1259                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1260
1261         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1262             speculationCheck(
1263                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1264                 m_jit.branchIfNotObject(op2GPR));
1265         }
1266         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1267             m_jit.branchTest8(
1268                 MacroAssembler::NonZero, 
1269                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1270                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1271     }
1272
1273     branchPtr(condition, op1GPR, op2GPR, taken);
1274     jump(notTaken);
1275 }
1276
1277 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1278 {
1279     BasicBlock* taken = branchNode->branchData()->taken.block;
1280     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1281
1282     // The branch instruction will branch to the taken block.
1283     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1284     if (taken == nextBlock()) {
1285         condition = JITCompiler::invert(condition);
1286         BasicBlock* tmp = taken;
1287         taken = notTaken;
1288         notTaken = tmp;
1289     }
1290
1291     if (node->child1()->isBooleanConstant()) {
1292         bool imm = node->child1()->asBoolean();
1293         SpeculateBooleanOperand op2(this, node->child2());
1294         branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1295     } else if (node->child2()->isBooleanConstant()) {
1296         SpeculateBooleanOperand op1(this, node->child1());
1297         bool imm = node->child2()->asBoolean();
1298         branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1299     } else {
1300         SpeculateBooleanOperand op1(this, node->child1());
1301         SpeculateBooleanOperand op2(this, node->child2());
1302         branch32(condition, op1.gpr(), op2.gpr(), taken);
1303     }
1304
1305     jump(notTaken);
1306 }
1307
1308 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1309 {
1310     BasicBlock* taken = branchNode->branchData()->taken.block;
1311     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1312
1313     // The branch instruction will branch to the taken block.
1314     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1315     if (taken == nextBlock()) {
1316         condition = JITCompiler::invert(condition);
1317         BasicBlock* tmp = taken;
1318         taken = notTaken;
1319         notTaken = tmp;
1320     }
1321
1322     if (node->child1()->isInt32Constant()) {
1323         int32_t imm = node->child1()->asInt32();
1324         SpeculateInt32Operand op2(this, node->child2());
1325         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1326     } else if (node->child2()->isInt32Constant()) {
1327         SpeculateInt32Operand op1(this, node->child1());
1328         int32_t imm = node->child2()->asInt32();
1329         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1330     } else {
1331         SpeculateInt32Operand op1(this, node->child1());
1332         SpeculateInt32Operand op2(this, node->child2());
1333         branch32(condition, op1.gpr(), op2.gpr(), taken);
1334     }
1335
1336     jump(notTaken);
1337 }
1338
1339 // Returns true if the compare is fused with a subsequent branch.
1340 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1341 {
1342     // Fused compare & branch.
1343     unsigned branchIndexInBlock = detectPeepHoleBranch();
1344     if (branchIndexInBlock != UINT_MAX) {
1345         Node* branchNode = m_block->at(branchIndexInBlock);
1346
1347         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1348         // so can be no intervening nodes to also reference the compare. 
1349         ASSERT(node->adjustedRefCount() == 1);
1350
1351         if (node->isBinaryUseKind(Int32Use))
1352             compilePeepHoleInt32Branch(node, branchNode, condition);
1353 #if USE(JSVALUE64)
1354         else if (node->isBinaryUseKind(Int52RepUse))
1355             compilePeepHoleInt52Branch(node, branchNode, condition);
1356 #endif // USE(JSVALUE64)
1357         else if (node->isBinaryUseKind(DoubleRepUse))
1358             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1359         else if (node->op() == CompareEq) {
1360             if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1361                 // Use non-peephole comparison, for now.
1362                 return false;
1363             }
1364             if (node->isBinaryUseKind(BooleanUse))
1365                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1366             else if (node->isBinaryUseKind(ObjectUse))
1367                 compilePeepHoleObjectEquality(node, branchNode);
1368             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1369                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1370             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1371                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1372             else {
1373                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1374                 return true;
1375             }
1376         } else {
1377             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1378             return true;
1379         }
1380
1381         use(node->child1());
1382         use(node->child2());
1383         m_indexInBlock = branchIndexInBlock;
1384         m_currentNode = branchNode;
1385         return true;
1386     }
1387     return false;
1388 }
1389
1390 void SpeculativeJIT::noticeOSRBirth(Node* node)
1391 {
1392     if (!node->hasVirtualRegister())
1393         return;
1394     
1395     VirtualRegister virtualRegister = node->virtualRegister();
1396     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1397     
1398     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1399 }
1400
1401 void SpeculativeJIT::compileMovHint(Node* node)
1402 {
1403     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1404     
1405     Node* child = node->child1().node();
1406     noticeOSRBirth(child);
1407     
1408     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1409 }
1410
1411 void SpeculativeJIT::bail(AbortReason reason)
1412 {
1413     if (verboseCompilationEnabled())
1414         dataLog("Bailing compilation.\n");
1415     m_compileOkay = true;
1416     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1417     clearGenerationInfo();
1418 }
1419
1420 void SpeculativeJIT::compileCurrentBlock()
1421 {
1422     ASSERT(m_compileOkay);
1423     
1424     if (!m_block)
1425         return;
1426     
1427     ASSERT(m_block->isReachable);
1428     
1429     m_jit.blockHeads()[m_block->index] = m_jit.label();
1430
1431     if (!m_block->intersectionOfCFAHasVisited) {
1432         // Don't generate code for basic blocks that are unreachable according to CFA.
1433         // But to be sure that nobody has generated a jump to this block, drop in a
1434         // breakpoint here.
1435         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1436         return;
1437     }
1438
1439     m_stream->appendAndLog(VariableEvent::reset());
1440     
1441     m_jit.jitAssertHasValidCallFrame();
1442     m_jit.jitAssertTagsInPlace();
1443     m_jit.jitAssertArgumentCountSane();
1444
1445     m_state.reset();
1446     m_state.beginBasicBlock(m_block);
1447     
1448     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1449         int operand = m_block->variablesAtHead.operandForIndex(i);
1450         Node* node = m_block->variablesAtHead[i];
1451         if (!node)
1452             continue; // No need to record dead SetLocal's.
1453         
1454         VariableAccessData* variable = node->variableAccessData();
1455         DataFormat format;
1456         if (!node->refCount())
1457             continue; // No need to record dead SetLocal's.
1458         format = dataFormatFor(variable->flushFormat());
1459         m_stream->appendAndLog(
1460             VariableEvent::setLocal(
1461                 VirtualRegister(operand),
1462                 variable->machineLocal(),
1463                 format));
1464     }
1465     
1466     m_codeOriginForExitTarget = CodeOrigin();
1467     m_codeOriginForExitProfile = CodeOrigin();
1468     
1469     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1470         m_currentNode = m_block->at(m_indexInBlock);
1471         
1472         // We may have hit a contradiction that the CFA was aware of but that the JIT
1473         // didn't cause directly.
1474         if (!m_state.isValid()) {
1475             bail(DFGBailedAtTopOfBlock);
1476             return;
1477         }
1478
1479         if (ASSERT_DISABLED)
1480             m_canExit = true; // Essentially disable the assertions.
1481         else
1482             m_canExit = mayExit(m_jit.graph(), m_currentNode);
1483         
1484         m_interpreter.startExecuting();
1485         m_jit.setForNode(m_currentNode);
1486         m_codeOriginForExitTarget = m_currentNode->origin.forExit;
1487         m_codeOriginForExitProfile = m_currentNode->origin.semantic;
1488         m_lastGeneratedNode = m_currentNode->op();
1489         
1490         ASSERT(m_currentNode->shouldGenerate());
1491         
1492         if (verboseCompilationEnabled()) {
1493             dataLogF(
1494                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1495                 (int)m_currentNode->index(),
1496                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1497             dataLog("\n");
1498         }
1499         
1500         compile(m_currentNode);
1501         
1502         if (belongsInMinifiedGraph(m_currentNode->op()))
1503             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1504         
1505 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1506         m_jit.clearRegisterAllocationOffsets();
1507 #endif
1508         
1509         if (!m_compileOkay) {
1510             bail(DFGBailedAtEndOfNode);
1511             return;
1512         }
1513         
1514         // Make sure that the abstract state is rematerialized for the next node.
1515         m_interpreter.executeEffects(m_indexInBlock);
1516     }
1517     
1518     // Perform the most basic verification that children have been used correctly.
1519     if (!ASSERT_DISABLED) {
1520         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1521             GenerationInfo& info = m_generationInfo[index];
1522             RELEASE_ASSERT(!info.alive());
1523         }
1524     }
1525 }
1526
1527 // If we are making type predictions about our arguments then
1528 // we need to check that they are correct on function entry.
1529 void SpeculativeJIT::checkArgumentTypes()
1530 {
1531     ASSERT(!m_currentNode);
1532     m_isCheckingArgumentTypes = true;
1533     m_codeOriginForExitTarget = CodeOrigin(0);
1534     m_codeOriginForExitProfile = CodeOrigin(0);
1535
1536     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1537         Node* node = m_jit.graph().m_arguments[i];
1538         if (!node) {
1539             // The argument is dead. We don't do any checks for such arguments.
1540             continue;
1541         }
1542         
1543         ASSERT(node->op() == SetArgument);
1544         ASSERT(node->shouldGenerate());
1545
1546         VariableAccessData* variableAccessData = node->variableAccessData();
1547         FlushFormat format = variableAccessData->flushFormat();
1548         
1549         if (format == FlushedJSValue)
1550             continue;
1551         
1552         VirtualRegister virtualRegister = variableAccessData->local();
1553
1554         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1555         
1556 #if USE(JSVALUE64)
1557         switch (format) {
1558         case FlushedInt32: {
1559             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1560             break;
1561         }
1562         case FlushedBoolean: {
1563             GPRTemporary temp(this);
1564             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1565             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1566             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1567             break;
1568         }
1569         case FlushedCell: {
1570             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1571             break;
1572         }
1573         default:
1574             RELEASE_ASSERT_NOT_REACHED();
1575             break;
1576         }
1577 #else
1578         switch (format) {
1579         case FlushedInt32: {
1580             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1581             break;
1582         }
1583         case FlushedBoolean: {
1584             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1585             break;
1586         }
1587         case FlushedCell: {
1588             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1589             break;
1590         }
1591         default:
1592             RELEASE_ASSERT_NOT_REACHED();
1593             break;
1594         }
1595 #endif
1596     }
1597     m_isCheckingArgumentTypes = false;
1598 }
1599
1600 bool SpeculativeJIT::compile()
1601 {
1602     checkArgumentTypes();
1603     
1604     ASSERT(!m_currentNode);
1605     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1606         m_jit.setForBlockIndex(blockIndex);
1607         m_block = m_jit.graph().block(blockIndex);
1608         compileCurrentBlock();
1609     }
1610     linkBranches();
1611     return true;
1612 }
1613
1614 void SpeculativeJIT::createOSREntries()
1615 {
1616     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1617         BasicBlock* block = m_jit.graph().block(blockIndex);
1618         if (!block)
1619             continue;
1620         if (!block->isOSRTarget)
1621             continue;
1622         
1623         // Currently we don't have OSR entry trampolines. We could add them
1624         // here if need be.
1625         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1626     }
1627 }
1628
1629 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1630 {
1631     unsigned osrEntryIndex = 0;
1632     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1633         BasicBlock* block = m_jit.graph().block(blockIndex);
1634         if (!block)
1635             continue;
1636         if (!block->isOSRTarget)
1637             continue;
1638         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1639     }
1640     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1641     
1642     if (verboseCompilationEnabled()) {
1643         DumpContext dumpContext;
1644         dataLog("OSR Entries:\n");
1645         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1646             dataLog("    ", inContext(entryData, &dumpContext), "\n");
1647         if (!dumpContext.isEmpty())
1648             dumpContext.dump(WTF::dataFile());
1649     }
1650 }
1651
1652 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1653 {
1654     Edge child3 = m_jit.graph().varArgChild(node, 2);
1655     Edge child4 = m_jit.graph().varArgChild(node, 3);
1656
1657     ArrayMode arrayMode = node->arrayMode();
1658     
1659     GPRReg baseReg = base.gpr();
1660     GPRReg propertyReg = property.gpr();
1661     
1662     SpeculateDoubleOperand value(this, child3);
1663
1664     FPRReg valueReg = value.fpr();
1665     
1666     DFG_TYPE_CHECK(
1667         JSValueRegs(), child3, SpecFullRealNumber,
1668         m_jit.branchDouble(
1669             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1670     
1671     if (!m_compileOkay)
1672         return;
1673     
1674     StorageOperand storage(this, child4);
1675     GPRReg storageReg = storage.gpr();
1676
1677     if (node->op() == PutByValAlias) {
1678         // Store the value to the array.
1679         GPRReg propertyReg = property.gpr();
1680         FPRReg valueReg = value.fpr();
1681         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1682         
1683         noResult(m_currentNode);
1684         return;
1685     }
1686     
1687     GPRTemporary temporary;
1688     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1689
1690     MacroAssembler::Jump slowCase;
1691     
1692     if (arrayMode.isInBounds()) {
1693         speculationCheck(
1694             OutOfBounds, JSValueRegs(), 0,
1695             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1696     } else {
1697         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1698         
1699         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1700         
1701         if (!arrayMode.isOutOfBounds())
1702             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1703         
1704         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1705         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1706         
1707         inBounds.link(&m_jit);
1708     }
1709     
1710     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1711
1712     base.use();
1713     property.use();
1714     value.use();
1715     storage.use();
1716     
1717     if (arrayMode.isOutOfBounds()) {
1718         addSlowPathGenerator(
1719             slowPathCall(
1720                 slowCase, this,
1721                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1722                 NoResult, baseReg, propertyReg, valueReg));
1723     }
1724
1725     noResult(m_currentNode, UseChildrenCalledExplicitly);
1726 }
1727
1728 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1729 {
1730     SpeculateCellOperand string(this, node->child1());
1731     SpeculateStrictInt32Operand index(this, node->child2());
1732     StorageOperand storage(this, node->child3());
1733
1734     GPRReg stringReg = string.gpr();
1735     GPRReg indexReg = index.gpr();
1736     GPRReg storageReg = storage.gpr();
1737     
1738     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1739
1740     // unsigned comparison so we can filter out negative indices and indices that are too large
1741     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1742
1743     GPRTemporary scratch(this);
1744     GPRReg scratchReg = scratch.gpr();
1745
1746     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1747
1748     // Load the character into scratchReg
1749     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1750
1751     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1752     JITCompiler::Jump cont8Bit = m_jit.jump();
1753
1754     is16Bit.link(&m_jit);
1755
1756     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1757
1758     cont8Bit.link(&m_jit);
1759
1760     int32Result(scratchReg, m_currentNode);
1761 }
1762
1763 void SpeculativeJIT::compileGetByValOnString(Node* node)
1764 {
1765     SpeculateCellOperand base(this, node->child1());
1766     SpeculateStrictInt32Operand property(this, node->child2());
1767     StorageOperand storage(this, node->child3());
1768     GPRReg baseReg = base.gpr();
1769     GPRReg propertyReg = property.gpr();
1770     GPRReg storageReg = storage.gpr();
1771
1772     GPRTemporary scratch(this);
1773     GPRReg scratchReg = scratch.gpr();
1774 #if USE(JSVALUE32_64)
1775     GPRTemporary resultTag;
1776     GPRReg resultTagReg = InvalidGPRReg;
1777     if (node->arrayMode().isOutOfBounds()) {
1778         GPRTemporary realResultTag(this);
1779         resultTag.adopt(realResultTag);
1780         resultTagReg = resultTag.gpr();
1781     }
1782 #endif
1783
1784     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1785
1786     // unsigned comparison so we can filter out negative indices and indices that are too large
1787     JITCompiler::Jump outOfBounds = m_jit.branch32(
1788         MacroAssembler::AboveOrEqual, propertyReg,
1789         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1790     if (node->arrayMode().isInBounds())
1791         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1792
1793     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1794
1795     // Load the character into scratchReg
1796     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1797
1798     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1799     JITCompiler::Jump cont8Bit = m_jit.jump();
1800
1801     is16Bit.link(&m_jit);
1802
1803     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1804
1805     JITCompiler::Jump bigCharacter =
1806         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1807
1808     // 8 bit string values don't need the isASCII check.
1809     cont8Bit.link(&m_jit);
1810
1811     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1812     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1813     m_jit.loadPtr(scratchReg, scratchReg);
1814
1815     addSlowPathGenerator(
1816         slowPathCall(
1817             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1818
1819     if (node->arrayMode().isOutOfBounds()) {
1820 #if USE(JSVALUE32_64)
1821         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1822 #endif
1823
1824         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1825         if (globalObject->stringPrototypeChainIsSane()) {
1826             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
1827             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
1828             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
1829             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
1830             // indexed properties either.
1831             // https://bugs.webkit.org/show_bug.cgi?id=144668
1832             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
1833             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
1834             
1835 #if USE(JSVALUE64)
1836             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1837                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
1838 #else
1839             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1840                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
1841                 baseReg, propertyReg));
1842 #endif
1843         } else {
1844 #if USE(JSVALUE64)
1845             addSlowPathGenerator(
1846                 slowPathCall(
1847                     outOfBounds, this, operationGetByValStringInt,
1848                     scratchReg, baseReg, propertyReg));
1849 #else
1850             addSlowPathGenerator(
1851                 slowPathCall(
1852                     outOfBounds, this, operationGetByValStringInt,
1853                     resultTagReg, scratchReg, baseReg, propertyReg));
1854 #endif
1855         }
1856         
1857 #if USE(JSVALUE64)
1858         jsValueResult(scratchReg, m_currentNode);
1859 #else
1860         jsValueResult(resultTagReg, scratchReg, m_currentNode);
1861 #endif
1862     } else
1863         cellResult(scratchReg, m_currentNode);
1864 }
1865
1866 void SpeculativeJIT::compileFromCharCode(Node* node)
1867 {
1868     SpeculateStrictInt32Operand property(this, node->child1());
1869     GPRReg propertyReg = property.gpr();
1870     GPRTemporary smallStrings(this);
1871     GPRTemporary scratch(this);
1872     GPRReg scratchReg = scratch.gpr();
1873     GPRReg smallStringsReg = smallStrings.gpr();
1874
1875     JITCompiler::JumpList slowCases;
1876     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
1877     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
1878     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
1879
1880     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
1881     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
1882     cellResult(scratchReg, m_currentNode);
1883 }
1884
1885 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
1886 {
1887     VirtualRegister virtualRegister = node->virtualRegister();
1888     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1889
1890     switch (info.registerFormat()) {
1891     case DataFormatStorage:
1892         RELEASE_ASSERT_NOT_REACHED();
1893
1894     case DataFormatBoolean:
1895     case DataFormatCell:
1896         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1897         return GeneratedOperandTypeUnknown;
1898
1899     case DataFormatNone:
1900     case DataFormatJSCell:
1901     case DataFormatJS:
1902     case DataFormatJSBoolean:
1903     case DataFormatJSDouble:
1904         return GeneratedOperandJSValue;
1905
1906     case DataFormatJSInt32:
1907     case DataFormatInt32:
1908         return GeneratedOperandInteger;
1909
1910     default:
1911         RELEASE_ASSERT_NOT_REACHED();
1912         return GeneratedOperandTypeUnknown;
1913     }
1914 }
1915
1916 void SpeculativeJIT::compileValueToInt32(Node* node)
1917 {
1918     switch (node->child1().useKind()) {
1919 #if USE(JSVALUE64)
1920     case Int52RepUse: {
1921         SpeculateStrictInt52Operand op1(this, node->child1());
1922         GPRTemporary result(this, Reuse, op1);
1923         GPRReg op1GPR = op1.gpr();
1924         GPRReg resultGPR = result.gpr();
1925         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
1926         int32Result(resultGPR, node, DataFormatInt32);
1927         return;
1928     }
1929 #endif // USE(JSVALUE64)
1930         
1931     case DoubleRepUse: {
1932         GPRTemporary result(this);
1933         SpeculateDoubleOperand op1(this, node->child1());
1934         FPRReg fpr = op1.fpr();
1935         GPRReg gpr = result.gpr();
1936         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
1937         
1938         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
1939         
1940         int32Result(gpr, node);
1941         return;
1942     }
1943     
1944     case NumberUse:
1945     case NotCellUse: {
1946         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
1947         case GeneratedOperandInteger: {
1948             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
1949             GPRTemporary result(this, Reuse, op1);
1950             m_jit.move(op1.gpr(), result.gpr());
1951             int32Result(result.gpr(), node, op1.format());
1952             return;
1953         }
1954         case GeneratedOperandJSValue: {
1955             GPRTemporary result(this);
1956 #if USE(JSVALUE64)
1957             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1958
1959             GPRReg gpr = op1.gpr();
1960             GPRReg resultGpr = result.gpr();
1961             FPRTemporary tempFpr(this);
1962             FPRReg fpr = tempFpr.fpr();
1963
1964             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
1965             JITCompiler::JumpList converted;
1966
1967             if (node->child1().useKind() == NumberUse) {
1968                 DFG_TYPE_CHECK(
1969                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
1970                     m_jit.branchTest64(
1971                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
1972             } else {
1973                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
1974                 
1975                 DFG_TYPE_CHECK(
1976                     JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
1977                 
1978                 // It's not a cell: so true turns into 1 and all else turns into 0.
1979                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
1980                 converted.append(m_jit.jump());
1981                 
1982                 isNumber.link(&m_jit);
1983             }
1984
1985             // First, if we get here we have a double encoded as a JSValue
1986             m_jit.move(gpr, resultGpr);
1987             unboxDouble(resultGpr, fpr);
1988
1989             silentSpillAllRegisters(resultGpr);
1990             callOperation(toInt32, resultGpr, fpr);
1991             silentFillAllRegisters(resultGpr);
1992
1993             converted.append(m_jit.jump());
1994
1995             isInteger.link(&m_jit);
1996             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
1997
1998             converted.link(&m_jit);
1999 #else
2000             Node* childNode = node->child1().node();
2001             VirtualRegister virtualRegister = childNode->virtualRegister();
2002             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2003
2004             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2005
2006             GPRReg payloadGPR = op1.payloadGPR();
2007             GPRReg resultGpr = result.gpr();
2008         
2009             JITCompiler::JumpList converted;
2010
2011             if (info.registerFormat() == DataFormatJSInt32)
2012                 m_jit.move(payloadGPR, resultGpr);
2013             else {
2014                 GPRReg tagGPR = op1.tagGPR();
2015                 FPRTemporary tempFpr(this);
2016                 FPRReg fpr = tempFpr.fpr();
2017                 FPRTemporary scratch(this);
2018
2019                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2020
2021                 if (node->child1().useKind() == NumberUse) {
2022                     DFG_TYPE_CHECK(
2023                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2024                         m_jit.branch32(
2025                             MacroAssembler::AboveOrEqual, tagGPR,
2026                             TrustedImm32(JSValue::LowestTag)));
2027                 } else {
2028                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2029                     
2030                     DFG_TYPE_CHECK(
2031                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2032                         m_jit.branchIfCell(op1.jsValueRegs()));
2033                     
2034                     // It's not a cell: so true turns into 1 and all else turns into 0.
2035                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2036                     m_jit.move(TrustedImm32(0), resultGpr);
2037                     converted.append(m_jit.jump());
2038                     
2039                     isBoolean.link(&m_jit);
2040                     m_jit.move(payloadGPR, resultGpr);
2041                     converted.append(m_jit.jump());
2042                     
2043                     isNumber.link(&m_jit);
2044                 }
2045
2046                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2047
2048                 silentSpillAllRegisters(resultGpr);
2049                 callOperation(toInt32, resultGpr, fpr);
2050                 silentFillAllRegisters(resultGpr);
2051
2052                 converted.append(m_jit.jump());
2053
2054                 isInteger.link(&m_jit);
2055                 m_jit.move(payloadGPR, resultGpr);
2056
2057                 converted.link(&m_jit);
2058             }
2059 #endif
2060             int32Result(resultGpr, node);
2061             return;
2062         }
2063         case GeneratedOperandTypeUnknown:
2064             RELEASE_ASSERT(!m_compileOkay);
2065             return;
2066         }
2067         RELEASE_ASSERT_NOT_REACHED();
2068         return;
2069     }
2070     
2071     default:
2072         ASSERT(!m_compileOkay);
2073         return;
2074     }
2075 }
2076
2077 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2078 {
2079     if (doesOverflow(node->arithMode())) {
2080         // We know that this sometimes produces doubles. So produce a double every
2081         // time. This at least allows subsequent code to not have weird conditionals.
2082             
2083         SpeculateInt32Operand op1(this, node->child1());
2084         FPRTemporary result(this);
2085             
2086         GPRReg inputGPR = op1.gpr();
2087         FPRReg outputFPR = result.fpr();
2088             
2089         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2090             
2091         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2092         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2093         positive.link(&m_jit);
2094             
2095         doubleResult(outputFPR, node);
2096         return;
2097     }
2098     
2099     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2100
2101     SpeculateInt32Operand op1(this, node->child1());
2102     GPRTemporary result(this);
2103
2104     m_jit.move(op1.gpr(), result.gpr());
2105
2106     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2107
2108     int32Result(result.gpr(), node, op1.format());
2109 }
2110
2111 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2112 {
2113     SpeculateDoubleOperand op1(this, node->child1());
2114     FPRTemporary scratch(this);
2115     GPRTemporary result(this);
2116     
2117     FPRReg valueFPR = op1.fpr();
2118     FPRReg scratchFPR = scratch.fpr();
2119     GPRReg resultGPR = result.gpr();
2120
2121     JITCompiler::JumpList failureCases;
2122     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2123     m_jit.branchConvertDoubleToInt32(
2124         valueFPR, resultGPR, failureCases, scratchFPR,
2125         shouldCheckNegativeZero(node->arithMode()));
2126     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2127
2128     int32Result(resultGPR, node);
2129 }
2130
2131 void SpeculativeJIT::compileDoubleRep(Node* node)
2132 {
2133     switch (node->child1().useKind()) {
2134     case RealNumberUse: {
2135         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2136         FPRTemporary result(this);
2137         
2138         JSValueRegs op1Regs = op1.jsValueRegs();
2139         FPRReg resultFPR = result.fpr();
2140         
2141 #if USE(JSVALUE64)
2142         GPRTemporary temp(this);
2143         GPRReg tempGPR = temp.gpr();
2144         m_jit.move(op1Regs.gpr(), tempGPR);
2145         m_jit.unboxDoubleWithoutAssertions(tempGPR, resultFPR);
2146 #else
2147         FPRTemporary temp(this);
2148         FPRReg tempFPR = temp.fpr();
2149         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2150 #endif
2151         
2152         JITCompiler::Jump done = m_jit.branchDouble(
2153             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2154         
2155         DFG_TYPE_CHECK(
2156             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2157         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2158         
2159         done.link(&m_jit);
2160         
2161         doubleResult(resultFPR, node);
2162         return;
2163     }
2164     
2165     case NotCellUse:
2166     case NumberUse: {
2167         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2168
2169         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2170         if (isInt32Speculation(possibleTypes)) {
2171             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2172             FPRTemporary result(this);
2173             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2174             doubleResult(result.fpr(), node);
2175             return;
2176         }
2177
2178         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2179         FPRTemporary result(this);
2180
2181 #if USE(JSVALUE64)
2182         GPRTemporary temp(this);
2183
2184         GPRReg op1GPR = op1.gpr();
2185         GPRReg tempGPR = temp.gpr();
2186         FPRReg resultFPR = result.fpr();
2187         JITCompiler::JumpList done;
2188
2189         JITCompiler::Jump isInteger = m_jit.branch64(
2190             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2191
2192         if (node->child1().useKind() == NotCellUse) {
2193             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2194             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2195
2196             static const double zero = 0;
2197             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2198
2199             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2200             done.append(isNull);
2201
2202             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2203                 m_jit.branchTest64(JITCompiler::NonZero, op1GPR, TrustedImm32(static_cast<int32_t>(~1))));
2204
2205             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2206             static const double one = 1;
2207             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2208             done.append(isFalse);
2209
2210             isUndefined.link(&m_jit);
2211             static const double NaN = PNaN;
2212             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2213             done.append(m_jit.jump());
2214
2215             isNumber.link(&m_jit);
2216         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2217             typeCheck(
2218                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2219                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2220         }
2221     
2222         m_jit.move(op1GPR, tempGPR);
2223         unboxDouble(tempGPR, resultFPR);
2224         done.append(m_jit.jump());
2225     
2226         isInteger.link(&m_jit);
2227         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2228         done.link(&m_jit);
2229 #else // USE(JSVALUE64) -> this is the 32_64 case
2230         FPRTemporary temp(this);
2231     
2232         GPRReg op1TagGPR = op1.tagGPR();
2233         GPRReg op1PayloadGPR = op1.payloadGPR();
2234         FPRReg tempFPR = temp.fpr();
2235         FPRReg resultFPR = result.fpr();
2236         JITCompiler::JumpList done;
2237     
2238         JITCompiler::Jump isInteger = m_jit.branch32(
2239             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2240
2241         if (node->child1().useKind() == NotCellUse) {
2242             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2243             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2244
2245             static const double zero = 0;
2246             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2247
2248             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2249             done.append(isNull);
2250
2251             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2252
2253             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2254             static const double one = 1;
2255             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2256             done.append(isFalse);
2257
2258             isUndefined.link(&m_jit);
2259             static const double NaN = PNaN;
2260             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2261             done.append(m_jit.jump());
2262
2263             isNumber.link(&m_jit);
2264         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2265             typeCheck(
2266                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2267                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2268         }
2269
2270         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2271         done.append(m_jit.jump());
2272     
2273         isInteger.link(&m_jit);
2274         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2275         done.link(&m_jit);
2276 #endif // USE(JSVALUE64)
2277     
2278         doubleResult(resultFPR, node);
2279         return;
2280     }
2281         
2282 #if USE(JSVALUE64)
2283     case Int52RepUse: {
2284         SpeculateStrictInt52Operand value(this, node->child1());
2285         FPRTemporary result(this);
2286         
2287         GPRReg valueGPR = value.gpr();
2288         FPRReg resultFPR = result.fpr();
2289
2290         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2291         
2292         doubleResult(resultFPR, node);
2293         return;
2294     }
2295 #endif // USE(JSVALUE64)
2296         
2297     default:
2298         RELEASE_ASSERT_NOT_REACHED();
2299         return;
2300     }
2301 }
2302
2303 void SpeculativeJIT::compileValueRep(Node* node)
2304 {
2305     switch (node->child1().useKind()) {
2306     case DoubleRepUse: {
2307         SpeculateDoubleOperand value(this, node->child1());
2308         JSValueRegsTemporary result(this);
2309         
2310         FPRReg valueFPR = value.fpr();
2311         JSValueRegs resultRegs = result.regs();
2312         
2313         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2314         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2315         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2316         // local was purified.
2317         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2318             m_jit.purifyNaN(valueFPR);
2319
2320         boxDouble(valueFPR, resultRegs);
2321         
2322         jsValueResult(resultRegs, node);
2323         return;
2324     }
2325         
2326 #if USE(JSVALUE64)
2327     case Int52RepUse: {
2328         SpeculateStrictInt52Operand value(this, node->child1());
2329         GPRTemporary result(this);
2330         
2331         GPRReg valueGPR = value.gpr();
2332         GPRReg resultGPR = result.gpr();
2333         
2334         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2335         
2336         jsValueResult(resultGPR, node);
2337         return;
2338     }
2339 #endif // USE(JSVALUE64)
2340         
2341     default:
2342         RELEASE_ASSERT_NOT_REACHED();
2343         return;
2344     }
2345 }
2346
2347 static double clampDoubleToByte(double d)
2348 {
2349     d += 0.5;
2350     if (!(d > 0))
2351         d = 0;
2352     else if (d > 255)
2353         d = 255;
2354     return d;
2355 }
2356
2357 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2358 {
2359     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2360     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2361     jit.xorPtr(result, result);
2362     MacroAssembler::Jump clamped = jit.jump();
2363     tooBig.link(&jit);
2364     jit.move(JITCompiler::TrustedImm32(255), result);
2365     clamped.link(&jit);
2366     inBounds.link(&jit);
2367 }
2368
2369 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2370 {
2371     // Unordered compare so we pick up NaN
2372     static const double zero = 0;
2373     static const double byteMax = 255;
2374     static const double half = 0.5;
2375     jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2376     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2377     jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2378     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2379     
2380     jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2381     // FIXME: This should probably just use a floating point round!
2382     // https://bugs.webkit.org/show_bug.cgi?id=72054
2383     jit.addDouble(source, scratch);
2384     jit.truncateDoubleToInt32(scratch, result);   
2385     MacroAssembler::Jump truncatedInt = jit.jump();
2386     
2387     tooSmall.link(&jit);
2388     jit.xorPtr(result, result);
2389     MacroAssembler::Jump zeroed = jit.jump();
2390     
2391     tooBig.link(&jit);
2392     jit.move(JITCompiler::TrustedImm32(255), result);
2393     
2394     truncatedInt.link(&jit);
2395     zeroed.link(&jit);
2396
2397 }
2398
2399 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2400 {
2401     if (node->op() == PutByValAlias)
2402         return JITCompiler::Jump();
2403     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2404         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2405     if (view) {
2406         uint32_t length = view->length();
2407         Node* indexNode = m_jit.graph().child(node, 1).node();
2408         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2409             return JITCompiler::Jump();
2410         return m_jit.branch32(
2411             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2412     }
2413     return m_jit.branch32(
2414         MacroAssembler::AboveOrEqual, indexGPR,
2415         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2416 }
2417
2418 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2419 {
2420     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2421     if (!jump.isSet())
2422         return;
2423     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2424 }
2425
2426 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2427 {
2428     ASSERT(isInt(type));
2429     
2430     SpeculateCellOperand base(this, node->child1());
2431     SpeculateStrictInt32Operand property(this, node->child2());
2432     StorageOperand storage(this, node->child3());
2433
2434     GPRReg baseReg = base.gpr();
2435     GPRReg propertyReg = property.gpr();
2436     GPRReg storageReg = storage.gpr();
2437
2438     GPRTemporary result(this);
2439     GPRReg resultReg = result.gpr();
2440
2441     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2442
2443     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2444     switch (elementSize(type)) {
2445     case 1:
2446         if (isSigned(type))
2447             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2448         else
2449             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2450         break;
2451     case 2:
2452         if (isSigned(type))
2453             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2454         else
2455             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2456         break;
2457     case 4:
2458         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2459         break;
2460     default:
2461         CRASH();
2462     }
2463     if (elementSize(type) < 4 || isSigned(type)) {
2464         int32Result(resultReg, node);
2465         return;
2466     }
2467     
2468     ASSERT(elementSize(type) == 4 && !isSigned(type));
2469     if (node->shouldSpeculateInt32()) {
2470         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2471         int32Result(resultReg, node);
2472         return;
2473     }
2474     
2475 #if USE(JSVALUE64)
2476     if (node->shouldSpeculateMachineInt()) {
2477         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2478         strictInt52Result(resultReg, node);
2479         return;
2480     }
2481 #endif
2482     
2483     FPRTemporary fresult(this);
2484     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2485     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2486     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2487     positive.link(&m_jit);
2488     doubleResult(fresult.fpr(), node);
2489 }
2490
2491 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2492 {
2493     ASSERT(isInt(type));
2494     
2495     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2496     GPRReg storageReg = storage.gpr();
2497     
2498     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2499     
2500     GPRTemporary value;
2501     GPRReg valueGPR = InvalidGPRReg;
2502     
2503     if (valueUse->isConstant()) {
2504         JSValue jsValue = valueUse->asJSValue();
2505         if (!jsValue.isNumber()) {
2506             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2507             noResult(node);
2508             return;
2509         }
2510         double d = jsValue.asNumber();
2511         if (isClamped(type)) {
2512             ASSERT(elementSize(type) == 1);
2513             d = clampDoubleToByte(d);
2514         }
2515         GPRTemporary scratch(this);
2516         GPRReg scratchReg = scratch.gpr();
2517         m_jit.move(Imm32(toInt32(d)), scratchReg);
2518         value.adopt(scratch);
2519         valueGPR = scratchReg;
2520     } else {
2521         switch (valueUse.useKind()) {
2522         case Int32Use: {
2523             SpeculateInt32Operand valueOp(this, valueUse);
2524             GPRTemporary scratch(this);
2525             GPRReg scratchReg = scratch.gpr();
2526             m_jit.move(valueOp.gpr(), scratchReg);
2527             if (isClamped(type)) {
2528                 ASSERT(elementSize(type) == 1);
2529                 compileClampIntegerToByte(m_jit, scratchReg);
2530             }
2531             value.adopt(scratch);
2532             valueGPR = scratchReg;
2533             break;
2534         }
2535             
2536 #if USE(JSVALUE64)
2537         case Int52RepUse: {
2538             SpeculateStrictInt52Operand valueOp(this, valueUse);
2539             GPRTemporary scratch(this);
2540             GPRReg scratchReg = scratch.gpr();
2541             m_jit.move(valueOp.gpr(), scratchReg);
2542             if (isClamped(type)) {
2543                 ASSERT(elementSize(type) == 1);
2544                 MacroAssembler::Jump inBounds = m_jit.branch64(
2545                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2546                 MacroAssembler::Jump tooBig = m_jit.branch64(
2547                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2548                 m_jit.move(TrustedImm32(0), scratchReg);
2549                 MacroAssembler::Jump clamped = m_jit.jump();
2550                 tooBig.link(&m_jit);
2551                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2552                 clamped.link(&m_jit);
2553                 inBounds.link(&m_jit);
2554             }
2555             value.adopt(scratch);
2556             valueGPR = scratchReg;
2557             break;
2558         }
2559 #endif // USE(JSVALUE64)
2560             
2561         case DoubleRepUse: {
2562             if (isClamped(type)) {
2563                 ASSERT(elementSize(type) == 1);
2564                 SpeculateDoubleOperand valueOp(this, valueUse);
2565                 GPRTemporary result(this);
2566                 FPRTemporary floatScratch(this);
2567                 FPRReg fpr = valueOp.fpr();
2568                 GPRReg gpr = result.gpr();
2569                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2570                 value.adopt(result);
2571                 valueGPR = gpr;
2572             } else {
2573                 SpeculateDoubleOperand valueOp(this, valueUse);
2574                 GPRTemporary result(this);
2575                 FPRReg fpr = valueOp.fpr();
2576                 GPRReg gpr = result.gpr();
2577                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2578                 m_jit.xorPtr(gpr, gpr);
2579                 MacroAssembler::Jump fixed = m_jit.jump();
2580                 notNaN.link(&m_jit);
2581                 
2582                 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2583                     fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2584                 
2585                 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr));
2586                 
2587                 fixed.link(&m_jit);
2588                 value.adopt(result);
2589                 valueGPR = gpr;
2590             }
2591             break;
2592         }
2593             
2594         default:
2595             RELEASE_ASSERT_NOT_REACHED();
2596             break;
2597         }
2598     }
2599     
2600     ASSERT_UNUSED(valueGPR, valueGPR != property);
2601     ASSERT(valueGPR != base);
2602     ASSERT(valueGPR != storageReg);
2603     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2604     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2605         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2606         outOfBounds = MacroAssembler::Jump();
2607     }
2608
2609     switch (elementSize(type)) {
2610     case 1:
2611         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2612         break;
2613     case 2:
2614         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2615         break;
2616     case 4:
2617         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2618         break;
2619     default:
2620         CRASH();
2621     }
2622     if (outOfBounds.isSet())
2623         outOfBounds.link(&m_jit);
2624     noResult(node);
2625 }
2626
2627 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2628 {
2629     ASSERT(isFloat(type));
2630     
2631     SpeculateCellOperand base(this, node->child1());
2632     SpeculateStrictInt32Operand property(this, node->child2());
2633     StorageOperand storage(this, node->child3());
2634
2635     GPRReg baseReg = base.gpr();
2636     GPRReg propertyReg = property.gpr();
2637     GPRReg storageReg = storage.gpr();
2638
2639     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2640
2641     FPRTemporary result(this);
2642     FPRReg resultReg = result.fpr();
2643     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2644     switch (elementSize(type)) {
2645     case 4:
2646         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2647         m_jit.convertFloatToDouble(resultReg, resultReg);
2648         break;
2649     case 8: {
2650         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2651         break;
2652     }
2653     default:
2654         RELEASE_ASSERT_NOT_REACHED();
2655     }
2656     
2657     doubleResult(resultReg, node);
2658 }
2659
2660 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2661 {
2662     ASSERT(isFloat(type));
2663     
2664     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2665     GPRReg storageReg = storage.gpr();
2666     
2667     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2668     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2669
2670     SpeculateDoubleOperand valueOp(this, valueUse);
2671     FPRTemporary scratch(this);
2672     FPRReg valueFPR = valueOp.fpr();
2673     FPRReg scratchFPR = scratch.fpr();
2674
2675     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2676     
2677     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2678     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2679         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2680         outOfBounds = MacroAssembler::Jump();
2681     }
2682     
2683     switch (elementSize(type)) {
2684     case 4: {
2685         m_jit.moveDouble(valueFPR, scratchFPR);
2686         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2687         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2688         break;
2689     }
2690     case 8:
2691         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2692         break;
2693     default:
2694         RELEASE_ASSERT_NOT_REACHED();
2695     }
2696     if (outOfBounds.isSet())
2697         outOfBounds.link(&m_jit);
2698     noResult(node);
2699 }
2700
2701 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2702 {
2703     // Check that prototype is an object.
2704     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
2705     
2706     // Initialize scratchReg with the value being checked.
2707     m_jit.move(valueReg, scratchReg);
2708     
2709     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2710     MacroAssembler::Label loop(&m_jit);
2711     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2712     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2713     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2714 #if USE(JSVALUE64)
2715     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2716 #else
2717     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2718 #endif
2719     
2720     // No match - result is false.
2721 #if USE(JSVALUE64)
2722     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2723 #else
2724     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2725 #endif
2726     MacroAssembler::Jump putResult = m_jit.jump();
2727     
2728     isInstance.link(&m_jit);
2729 #if USE(JSVALUE64)
2730     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2731 #else
2732     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2733 #endif
2734     
2735     putResult.link(&m_jit);
2736 }
2737
2738 void SpeculativeJIT::compileInstanceOf(Node* node)
2739 {
2740     if (node->child1().useKind() == UntypedUse) {
2741         // It might not be a cell. Speculate less aggressively.
2742         // Or: it might only be used once (i.e. by us), so we get zero benefit
2743         // from speculating any more aggressively than we absolutely need to.
2744         
2745         JSValueOperand value(this, node->child1());
2746         SpeculateCellOperand prototype(this, node->child2());
2747         GPRTemporary scratch(this);
2748         GPRTemporary scratch2(this);
2749         
2750         GPRReg prototypeReg = prototype.gpr();
2751         GPRReg scratchReg = scratch.gpr();
2752         GPRReg scratch2Reg = scratch2.gpr();
2753         
2754         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
2755         GPRReg valueReg = value.jsValueRegs().payloadGPR();
2756         moveFalseTo(scratchReg);
2757
2758         MacroAssembler::Jump done = m_jit.jump();
2759         
2760         isCell.link(&m_jit);
2761         
2762         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2763         
2764         done.link(&m_jit);
2765
2766         blessedBooleanResult(scratchReg, node);
2767         return;
2768     }
2769     
2770     SpeculateCellOperand value(this, node->child1());
2771     SpeculateCellOperand prototype(this, node->child2());
2772     
2773     GPRTemporary scratch(this);
2774     GPRTemporary scratch2(this);
2775     
2776     GPRReg valueReg = value.gpr();
2777     GPRReg prototypeReg = prototype.gpr();
2778     GPRReg scratchReg = scratch.gpr();
2779     GPRReg scratch2Reg = scratch2.gpr();
2780     
2781     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2782
2783     blessedBooleanResult(scratchReg, node);
2784 }
2785
2786 void SpeculativeJIT::compileAdd(Node* node)
2787 {
2788     switch (node->binaryUseKind()) {
2789     case Int32Use: {
2790         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2791         
2792         if (node->child1()->isInt32Constant()) {
2793             int32_t imm1 = node->child1()->asInt32();
2794             SpeculateInt32Operand op2(this, node->child2());
2795             GPRTemporary result(this);
2796
2797             if (!shouldCheckOverflow(node->arithMode())) {
2798                 m_jit.move(op2.gpr(), result.gpr());
2799                 m_jit.add32(Imm32(imm1), result.gpr());
2800             } else
2801                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
2802
2803             int32Result(result.gpr(), node);
2804             return;
2805         }
2806         
2807         if (node->child2()->isInt32Constant()) {
2808             SpeculateInt32Operand op1(this, node->child1());
2809             int32_t imm2 = node->child2()->asInt32();
2810             GPRTemporary result(this);
2811                 
2812             if (!shouldCheckOverflow(node->arithMode())) {
2813                 m_jit.move(op1.gpr(), result.gpr());
2814                 m_jit.add32(Imm32(imm2), result.gpr());
2815             } else
2816                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
2817
2818             int32Result(result.gpr(), node);
2819             return;
2820         }
2821                 
2822         SpeculateInt32Operand op1(this, node->child1());
2823         SpeculateInt32Operand op2(this, node->child2());
2824         GPRTemporary result(this, Reuse, op1, op2);
2825
2826         GPRReg gpr1 = op1.gpr();
2827         GPRReg gpr2 = op2.gpr();
2828         GPRReg gprResult = result.gpr();
2829
2830         if (!shouldCheckOverflow(node->arithMode())) {
2831             if (gpr1 == gprResult)
2832                 m_jit.add32(gpr2, gprResult);
2833             else {
2834                 m_jit.move(gpr2, gprResult);
2835                 m_jit.add32(gpr1, gprResult);
2836             }
2837         } else {
2838             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
2839                 
2840             if (gpr1 == gprResult)
2841                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
2842             else if (gpr2 == gprResult)
2843                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
2844             else
2845                 speculationCheck(Overflow, JSValueRegs(), 0, check);
2846         }
2847
2848         int32Result(gprResult, node);
2849         return;
2850     }
2851         
2852 #if USE(JSVALUE64)
2853     case Int52RepUse: {
2854         ASSERT(shouldCheckOverflow(node->arithMode()));
2855         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2856
2857         // Will we need an overflow check? If we can prove that neither input can be
2858         // Int52 then the overflow check will not be necessary.
2859         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2860             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2861             SpeculateWhicheverInt52Operand op1(this, node->child1());
2862             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2863             GPRTemporary result(this, Reuse, op1);
2864             m_jit.move(op1.gpr(), result.gpr());
2865             m_jit.add64(op2.gpr(), result.gpr());
2866             int52Result(result.gpr(), node, op1.format());
2867             return;
2868         }
2869         
2870         SpeculateInt52Operand op1(this, node->child1());
2871         SpeculateInt52Operand op2(this, node->child2());
2872         GPRTemporary result(this);
2873         m_jit.move(op1.gpr(), result.gpr());
2874         speculationCheck(
2875             Int52Overflow, JSValueRegs(), 0,
2876             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2877         int52Result(result.gpr(), node);
2878         return;
2879     }
2880 #endif // USE(JSVALUE64)
2881     
2882     case DoubleRepUse: {
2883         SpeculateDoubleOperand op1(this, node->child1());
2884         SpeculateDoubleOperand op2(this, node->child2());
2885         FPRTemporary result(this, op1, op2);
2886
2887         FPRReg reg1 = op1.fpr();
2888         FPRReg reg2 = op2.fpr();
2889         m_jit.addDouble(reg1, reg2, result.fpr());
2890
2891         doubleResult(result.fpr(), node);
2892         return;
2893     }
2894         
2895     default:
2896         RELEASE_ASSERT_NOT_REACHED();
2897         break;
2898     }
2899 }
2900
2901 void SpeculativeJIT::compileMakeRope(Node* node)
2902 {
2903     ASSERT(node->child1().useKind() == KnownStringUse);
2904     ASSERT(node->child2().useKind() == KnownStringUse);
2905     ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
2906     
2907     SpeculateCellOperand op1(this, node->child1());
2908     SpeculateCellOperand op2(this, node->child2());
2909     SpeculateCellOperand op3(this, node->child3());
2910     GPRTemporary result(this);
2911     GPRTemporary allocator(this);
2912     GPRTemporary scratch(this);
2913     
2914     GPRReg opGPRs[3];
2915     unsigned numOpGPRs;
2916     opGPRs[0] = op1.gpr();
2917     opGPRs[1] = op2.gpr();
2918     if (node->child3()) {
2919         opGPRs[2] = op3.gpr();
2920         numOpGPRs = 3;
2921     } else {
2922         opGPRs[2] = InvalidGPRReg;
2923         numOpGPRs = 2;
2924     }
2925     GPRReg resultGPR = result.gpr();
2926     GPRReg allocatorGPR = allocator.gpr();
2927     GPRReg scratchGPR = scratch.gpr();
2928     
2929     JITCompiler::JumpList slowPath;
2930     MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithDestructor(sizeof(JSRopeString));
2931     m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
2932     emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
2933         
2934     m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
2935     for (unsigned i = 0; i < numOpGPRs; ++i)
2936         m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2937     for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
2938         m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2939     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
2940     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
2941     if (!ASSERT_DISABLED) {
2942         JITCompiler::Jump ok = m_jit.branch32(
2943             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2944         m_jit.abortWithReason(DFGNegativeStringLength);
2945         ok.link(&m_jit);
2946     }
2947     for (unsigned i = 1; i < numOpGPRs; ++i) {
2948         m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
2949         speculationCheck(
2950             Uncountable, JSValueSource(), nullptr,
2951             m_jit.branchAdd32(
2952                 JITCompiler::Overflow,
2953                 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
2954     }
2955     m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
2956     m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
2957     if (!ASSERT_DISABLED) {
2958         JITCompiler::Jump ok = m_jit.branch32(
2959             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2960         m_jit.abortWithReason(DFGNegativeStringLength);
2961         ok.link(&m_jit);
2962     }
2963     m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
2964     
2965     switch (numOpGPRs) {
2966     case 2:
2967         addSlowPathGenerator(slowPathCall(
2968             slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
2969         break;
2970     case 3:
2971         addSlowPathGenerator(slowPathCall(
2972             slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
2973         break;
2974     default:
2975         RELEASE_ASSERT_NOT_REACHED();
2976         break;
2977     }
2978         
2979     cellResult(resultGPR, node);
2980 }
2981
2982 void SpeculativeJIT::compileArithClz32(Node* node)
2983 {
2984     ASSERT_WITH_MESSAGE(node->child1().useKind() == Int32Use || node->child1().useKind() == KnownInt32Use, "The Fixup phase should have enforced a Int32 operand.");
2985     SpeculateInt32Operand value(this, node->child1());
2986     GPRTemporary result(this, Reuse, value);
2987     GPRReg valueReg = value.gpr();
2988     GPRReg resultReg = result.gpr();
2989     m_jit.countLeadingZeros32(valueReg, resultReg);
2990     int32Result(resultReg, node);
2991 }
2992
2993 void SpeculativeJIT::compileArithSub(Node* node)
2994 {
2995     switch (node->binaryUseKind()) {
2996     case Int32Use: {
2997         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2998         
2999         if (node->child2()->isInt32Constant()) {
3000             SpeculateInt32Operand op1(this, node->child1());
3001             int32_t imm2 = node->child2()->asInt32();
3002             GPRTemporary result(this);
3003
3004             if (!shouldCheckOverflow(node->arithMode())) {
3005                 m_jit.move(op1.gpr(), result.gpr());
3006                 m_jit.sub32(Imm32(imm2), result.gpr());
3007             } else {
3008                 GPRTemporary scratch(this);
3009                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
3010             }
3011
3012             int32Result(result.gpr(), node);
3013             return;
3014         }
3015             
3016         if (node->child1()->isInt32Constant()) {
3017             int32_t imm1 = node->child1()->asInt32();
3018             SpeculateInt32Operand op2(this, node->child2());
3019             GPRTemporary result(this);
3020                 
3021             m_jit.move(Imm32(imm1), result.gpr());
3022             if (!shouldCheckOverflow(node->arithMode()))
3023                 m_jit.sub32(op2.gpr(), result.gpr());
3024             else
3025                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3026                 
3027             int32Result(result.gpr(), node);
3028             return;
3029         }
3030             
3031         SpeculateInt32Operand op1(this, node->child1());
3032         SpeculateInt32Operand op2(this, node->child2());
3033         GPRTemporary result(this);
3034
3035         if (!shouldCheckOverflow(node->arithMode())) {
3036             m_jit.move(op1.gpr(), result.gpr());
3037             m_jit.sub32(op2.gpr(), result.gpr());
3038         } else
3039             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
3040
3041         int32Result(result.gpr(), node);
3042         return;
3043     }
3044         
3045 #if USE(JSVALUE64)
3046     case Int52RepUse: {
3047         ASSERT(shouldCheckOverflow(node->arithMode()));
3048         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3049
3050         // Will we need an overflow check? If we can prove that neither input can be
3051         // Int52 then the overflow check will not be necessary.
3052         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
3053             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
3054             SpeculateWhicheverInt52Operand op1(this, node->child1());
3055             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3056             GPRTemporary result(this, Reuse, op1);
3057             m_jit.move(op1.gpr(), result.gpr());
3058             m_jit.sub64(op2.gpr(), result.gpr());
3059             int52Result(result.gpr(), node, op1.format());
3060             return;
3061         }
3062         
3063         SpeculateInt52Operand op1(this, node->child1());
3064         SpeculateInt52Operand op2(this, node->child2());
3065         GPRTemporary result(this);
3066         m_jit.move(op1.gpr(), result.gpr());
3067         speculationCheck(
3068             Int52Overflow, JSValueRegs(), 0,
3069             m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3070         int52Result(result.gpr(), node);
3071         return;
3072     }
3073 #endif // USE(JSVALUE64)
3074
3075     case DoubleRepUse: {
3076         SpeculateDoubleOperand op1(this, node->child1());
3077         SpeculateDoubleOperand op2(this, node->child2());
3078         FPRTemporary result(this, op1);
3079
3080         FPRReg reg1 = op1.fpr();
3081         FPRReg reg2 = op2.fpr();
3082         m_jit.subDouble(reg1, reg2, result.fpr());
3083
3084         doubleResult(result.fpr(), node);
3085         return;
3086     }
3087         
3088     default:
3089         RELEASE_ASSERT_NOT_REACHED();
3090         return;
3091     }
3092 }
3093
3094 void SpeculativeJIT::compileArithNegate(Node* node)
3095 {
3096     switch (node->child1().useKind()) {
3097     case Int32Use: {
3098         SpeculateInt32Operand op1(this, node->child1());
3099         GPRTemporary result(this);
3100
3101         m_jit.move(op1.gpr(), result.gpr());
3102
3103         // Note: there is no notion of being not used as a number, but someone
3104         // caring about negative zero.
3105         
3106         if (!shouldCheckOverflow(node->arithMode()))
3107             m_jit.neg32(result.gpr());
3108         else if (!shouldCheckNegativeZero(node->arithMode()))
3109             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
3110         else {
3111             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
3112             m_jit.neg32(result.gpr());
3113         }
3114
3115         int32Result(result.gpr(), node);
3116         return;
3117     }
3118
3119 #if USE(JSVALUE64)
3120     case Int52RepUse: {
3121         ASSERT(shouldCheckOverflow(node->arithMode()));
3122         
3123         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)) {
3124             SpeculateWhicheverInt52Operand op1(this, node->child1());
3125             GPRTemporary result(this);
3126             GPRReg op1GPR = op1.gpr();
3127             GPRReg resultGPR = result.gpr();
3128             m_jit.move(op1GPR, resultGPR);
3129             m_jit.neg64(resultGPR);
3130             if (shouldCheckNegativeZero(node->arithMode())) {
3131                 speculationCheck(
3132                     NegativeZero, JSValueRegs(), 0,
3133                     m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3134             }
3135             int52Result(resultGPR, node, op1.format());
3136             return;
3137         }
3138         
3139         SpeculateInt52Operand op1(this, node->child1());
3140         GPRTemporary result(this);
3141         GPRReg op1GPR = op1.gpr();
3142         GPRReg resultGPR = result.gpr();
3143         m_jit.move(op1GPR, resultGPR);
3144         speculationCheck(
3145             Int52Overflow, JSValueRegs(), 0,
3146             m_jit.branchNeg64(MacroAssembler::Overflow, resultGPR));
3147         if (shouldCheckNegativeZero(node->arithMode())) {
3148             speculationCheck(
3149                 NegativeZero, JSValueRegs(), 0,
3150                 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3151         }
3152         int52Result(resultGPR, node);
3153         return;
3154     }
3155 #endif // USE(JSVALUE64)
3156         
3157     case DoubleRepUse: {
3158         SpeculateDoubleOperand op1(this, node->child1());
3159         FPRTemporary result(this);
3160         
3161         m_jit.negateDouble(op1.fpr(), result.fpr());
3162         
3163         doubleResult(result.fpr(), node);
3164         return;
3165     }
3166         
3167     default:
3168         RELEASE_ASSERT_NOT_REACHED();
3169         return;
3170     }
3171 }
3172 void SpeculativeJIT::compileArithMul(Node* node)
3173 {
3174     switch (node->binaryUseKind()) {
3175     case Int32Use: {
3176         SpeculateInt32Operand op1(this, node->child1());
3177         SpeculateInt32Operand op2(this, node->child2());
3178         GPRTemporary result(this);
3179
3180         GPRReg reg1 = op1.gpr();
3181         GPRReg reg2 = op2.gpr();
3182
3183         // We can perform truncated multiplications if we get to this point, because if the
3184         // fixup phase could not prove that it would be safe, it would have turned us into
3185         // a double multiplication.
3186         if (!shouldCheckOverflow(node->arithMode())) {
3187             m_jit.move(reg1, result.gpr());
3188             m_jit.mul32(reg2, result.gpr());
3189         } else {
3190             speculationCheck(
3191                 Overflow, JSValueRegs(), 0,
3192                 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3193         }
3194             
3195         // Check for negative zero, if the users of this node care about such things.
3196         if (shouldCheckNegativeZero(node->arithMode())) {
3197             MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3198             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
3199             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
3200             resultNonZero.link(&m_jit);
3201         }
3202
3203         int32Result(result.gpr(), node);
3204         return;
3205     }
3206     
3207 #if USE(JSVALUE64)   
3208     case Int52RepUse: {
3209         ASSERT(shouldCheckOverflow(node->arithMode()));
3210         
3211         // This is super clever. We want to do an int52 multiplication and check the
3212         // int52 overflow bit. There is no direct hardware support for this, but we do
3213         // have the ability to do an int64 multiplication and check the int64 overflow
3214         // bit. We leverage that. Consider that a, b are int52 numbers inside int64
3215         // registers, with the high 12 bits being sign-extended. We can do:
3216         //
3217         //     (a * (b << 12))
3218         //
3219         // This will give us a left-shifted int52 (value is in high 52 bits, low 16
3220         // bits are zero) plus the int52 overflow bit. I.e. whether this 64-bit
3221         // multiplication overflows is identical to whether the 'a * b' 52-bit
3222         // multiplication overflows.
3223         //
3224         // In our nomenclature, this is:
3225         //
3226         //     strictInt52(a) * int52(b) => int52
3227         //
3228         // That is "strictInt52" means unshifted and "int52" means left-shifted by 16
3229         // bits.
3230         //
3231         // We don't care which of op1 or op2 serves as the left-shifted operand, so
3232         // we just do whatever is more convenient for op1 and have op2 do the
3233         // opposite. This ensures that we do at most one shift.
3234
3235         SpeculateWhicheverInt52Operand op1(this, node->child1());
3236         SpeculateWhicheverInt52Operand op2(this, node->child2(), OppositeShift, op1);
3237         GPRTemporary result(this);
3238         
3239         GPRReg op1GPR = op1.gpr();
3240         GPRReg op2GPR = op2.gpr();
3241         GPRReg resultGPR = result.gpr();
3242         
3243         m_jit.move(op1GPR, resultGPR);
3244         speculationCheck(
3245             Int52Overflow, JSValueRegs(), 0,
3246             m_jit.branchMul64(MacroAssembler::Overflow, op2GPR, resultGPR));
3247         
3248         if (shouldCheckNegativeZero(node->arithMode())) {
3249             MacroAssembler::Jump resultNonZero = m_jit.branchTest64(
3250                 MacroAssembler::NonZero, resultGPR);
3251             speculationCheck(
3252                 NegativeZero, JSValueRegs(), 0,
3253                 m_jit.branch64(MacroAssembler::LessThan, op1GPR, TrustedImm64(0)));
3254             speculationCheck(
3255                 NegativeZero, JSValueRegs(), 0,
3256                 m_jit.branch64(MacroAssembler::LessThan, op2GPR, TrustedImm64(0)));
3257             resultNonZero.link(&m_jit);
3258         }
3259         
3260         int52Result(resultGPR, node);
3261         return;
3262     }
3263 #endif // USE(JSVALUE64)
3264         
3265     case DoubleRepUse: {
3266         SpeculateDoubleOperand op1(this, node->child1());
3267         SpeculateDoubleOperand op2(this, node->child2());
3268         FPRTemporary result(this, op1, op2);
3269         
3270         FPRReg reg1 = op1.fpr();
3271         FPRReg reg2 = op2.fpr();
3272         
3273         m_jit.mulDouble(reg1, reg2, result.fpr());
3274         
3275         doubleResult(result.fpr(), node);
3276         return;
3277     }
3278         
3279     default:
3280         RELEASE_ASSERT_NOT_REACHED();
3281         return;
3282     }
3283 }
3284
3285 void SpeculativeJIT::compileArithDiv(Node* node)
3286 {
3287     switch (node->binaryUseKind()) {
3288     case Int32Use: {
3289 #if CPU(X86) || CPU(X86_64)
3290         SpeculateInt32Operand op1(this, node->child1());
3291         SpeculateInt32Operand op2(this, node->child2());
3292         GPRTemporary eax(this, X86Registers::eax);
3293         GPRTemporary edx(this, X86Registers::edx);
3294         GPRReg op1GPR = op1.gpr();
3295         GPRReg op2GPR = op2.gpr();
3296     
3297         GPRReg op2TempGPR;
3298         GPRReg temp;
3299         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3300             op2TempGPR = allocate();
3301             temp = op2TempGPR;
3302         } else {
3303             op2TempGPR = InvalidGPRReg;
3304             if (op1GPR == X86Registers::eax)
3305                 temp = X86Registers::edx;
3306             else
3307                 temp = X86Registers::eax;
3308         }
3309     
3310         ASSERT(temp != op1GPR);
3311         ASSERT(temp != op2GPR);
3312     
3313         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3314     
3315         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3316     
3317         JITCompiler::JumpList done;
3318         if (shouldCheckOverflow(node->arithMode())) {
3319             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3320             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3321         } else {
3322             // This is the case where we convert the result to an int after we're done, and we
3323             // already know that the denominator is either -1 or 0. So, if the denominator is
3324             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3325             // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
3326             // are happy to fall through to a normal division, since we're just dividing
3327             // something by negative 1.
3328         
3329             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3330             m_jit.move(TrustedImm32(0), eax.gpr());
3331             done.append(m_jit.jump());
3332         
3333             notZero.link(&m_jit);
3334             JITCompiler::Jump notNeg2ToThe31 =
3335                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3336             m_jit.zeroExtend32ToPtr(op1GPR, eax.gpr());
3337             done.append(m_jit.jump());
3338         
3339             notNeg2ToThe31.link(&m_jit);
3340         }
3341     
3342         safeDenominator.link(&m_jit);
3343     
3344         // If the user cares about negative zero, then speculate that we're not about
3345         // to produce negative zero.
3346         if (shouldCheckNegativeZero(node->arithMode())) {
3347             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3348             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3349             numeratorNonZero.link(&m_jit);
3350         }
3351     
3352         if (op2TempGPR != InvalidGPRReg) {
3353             m_jit.move(op2GPR, op2TempGPR);
3354             op2GPR = op2TempGPR;
3355         }
3356             
3357         m_jit.move(op1GPR, eax.gpr());
3358         m_jit.assembler().cdq();
3359         m_jit.assembler().idivl_r(op2GPR);
3360             
3361         if (op2TempGPR != InvalidGPRReg)
3362             unlock(op2TempGPR);
3363
3364         // Check that there was no remainder. If there had been, then we'd be obligated to
3365         // produce a double result instead.
3366         if (shouldCheckOverflow(node->arithMode()))
3367             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
3368         
3369         done.link(&m_jit);
3370         int32Result(eax.gpr(), node);
3371 #elif HAVE(ARM_IDIV_INSTRUCTIONS) || CPU(ARM64)
3372         SpeculateInt32Operand op1(this, node->child1());
3373         SpeculateInt32Operand op2(this, node->child2());
3374         GPRReg op1GPR = op1.gpr();
3375         GPRReg op2GPR = op2.gpr();
3376         GPRTemporary quotient(this);
3377         GPRTemporary multiplyAnswer(this);
3378
3379         // If the user cares about negative zero, then speculate that we're not about
3380         // to produce negative zero.
3381         if (shouldCheckNegativeZero(node->arithMode())) {
3382             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3383             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3384             numeratorNonZero.link(&m_jit);
3385         }
3386
3387         m_jit.assembler().sdiv<32>(quotient.gpr(), op1GPR, op2GPR);
3388
3389         // Check that there was no remainder. If there had been, then we'd be obligated to
3390         // produce a double result instead.
3391         if (shouldCheckOverflow(node->arithMode())) {
3392             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
3393             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
3394         }
3395
3396         int32Result(quotient.gpr(), node);
3397 #else
3398         RELEASE_ASSERT_NOT_REACHED();
3399 #endif
3400         break;
3401     }
3402         
3403     case DoubleRepUse: {
3404         SpeculateDoubleOperand op1(this, node->child1());
3405         SpeculateDoubleOperand op2(this, node->child2());
3406         FPRTemporary result(this, op1);
3407         
3408         FPRReg reg1 = op1.fpr();
3409         FPRReg reg2 = op2.fpr();
3410         m_jit.divDouble(reg1, reg2, result.fpr());
3411         
3412         doubleResult(result.fpr(), node);
3413         break;
3414     }
3415         
3416     default:
3417         RELEASE_ASSERT_NOT_REACHED();
3418         break;
3419     }
3420 }
3421
3422 void SpeculativeJIT::compileArithMod(Node* node)
3423 {
3424     switch (node->binaryUseKind()) {
3425     case Int32Use: {
3426         // In the fast path, the dividend value could be the final result
3427         // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
3428         SpeculateStrictInt32Operand op1(this, node->child1());
3429         
3430         if (node->child2()->isInt32Constant()) {
3431             int32_t divisor = node->child2()->asInt32();
3432             if (divisor > 1 && hasOneBitSet(divisor)) {
3433                 unsigned logarithm = WTF::fastLog2(divisor);
3434                 GPRReg dividendGPR = op1.gpr();
3435                 GPRTemporary result(this);
3436                 GPRReg resultGPR = result.gpr();
3437
3438                 // This is what LLVM generates. It's pretty crazy. Here's my
3439                 // attempt at understanding it.
3440                 
3441                 // First, compute either divisor - 1, or 0, depending on whether
3442                 // the dividend is negative:
3443                 //
3444                 // If dividend < 0:  resultGPR = divisor - 1
3445                 // If dividend >= 0: resultGPR = 0
3446                 m_jit.move(dividendGPR, resultGPR);
3447                 m_jit.rshift32(TrustedImm32(31), resultGPR);
3448                 m_jit.urshift32(TrustedImm32(32 - logarithm), resultGPR);
3449                 
3450                 // Add in the dividend, so that:
3451                 //
3452                 // If dividend < 0:  resultGPR = dividend + divisor - 1
3453                 // If dividend >= 0: resultGPR = dividend
3454                 m_jit.add32(dividendGPR, resultGPR);
3455                 
3456                 // Mask so as to only get the *high* bits. This rounds down
3457                 // (towards negative infinity) resultGPR to the nearest multiple
3458                 // of divisor, so that:
3459                 //
3460                 // If dividend < 0:  resultGPR = floor((dividend + divisor - 1) / divisor)
3461                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3462                 //
3463                 // Note that this can be simplified to:
3464                 //
3465                 // If dividend < 0:  resultGPR = ceil(dividend / divisor)
3466                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3467                 //
3468                 // Note that if the dividend is negative, resultGPR will also be negative.
3469                 // Regardless of the sign of dividend, resultGPR will be rounded towards
3470                 // zero, because of how things are conditionalized.
3471                 m_jit.and32(TrustedImm32(-divisor), resultGPR);
3472                 
3473                 // Subtract resultGPR from dividendGPR, which yields the remainder:
3474                 //
3475                 // resultGPR = dividendGPR - resultGPR
3476                 m_jit.neg32(resultGPR);
3477                 m_jit.add32(dividendGPR, resultGPR);
3478                 
3479                 if (shouldCheckNegativeZero(node->arithMode())) {
3480                     // Check that we're not about to create negative zero.
3481                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3482                     speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, resultGPR));
3483                     numeratorPositive.link(&m_jit);
3484                 }
3485
3486                 int32Result(resultGPR, node);
3487                 return;
3488             }
3489         }
3490         
3491 #if CPU(X86) || CPU(X86_64)
3492         if (node->child2()->isInt32Constant()) {
3493             int32_t divisor = node->child2()->asInt32();
3494             if (divisor && divisor != -1) {
3495                 GPRReg op1Gpr = op1.gpr();
3496
3497                 GPRTemporary eax(this, X86Registers::eax);
3498                 GPRTemporary edx(this, X86Registers::edx);
3499                 GPRTemporary scratch(this);
3500                 GPRReg scratchGPR = scratch.gpr();
3501
3502                 GPRReg op1SaveGPR;
3503                 if (op1Gpr == X86Registers::eax || op1Gpr == X86Registers::edx) {
3504                     op1SaveGPR = allocate();
3505                     ASSERT(op1Gpr != op1SaveGPR);
3506                     m_jit.move(op1Gpr, op1SaveGPR);
3507                 } else
3508                     op1SaveGPR = op1Gpr;
3509                 ASSERT(op1SaveGPR != X86Registers::eax);
3510                 ASSERT(op1SaveGPR != X86Registers::edx);
3511
3512                 m_jit.move(op1Gpr, eax.gpr());
3513                 m_jit.move(TrustedImm32(divisor), scratchGPR);
3514                 m_jit.assembler().cdq();
3515                 m_jit.assembler().idivl_r(scratchGPR);
3516                 if (shouldCheckNegativeZero(node->arithMode())) {
3517                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3518                     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3519                     numeratorPositive.link(&m_jit);
3520                 }
3521             
3522                 if (op1SaveGPR != op1Gpr)
3523                     unlock(op1SaveGPR);
3524
3525                 int32Result(edx.gpr(), node);
3526                 return;
3527             }
3528         }
3529 #endif
3530
3531         SpeculateInt32Operand op2(this, node->child2());
3532 #if CPU(X86) || CPU(X86_64)
3533         GPRTemporary eax(this, X86Registers::eax);
3534         GPRTemporary edx(this, X86Registers::edx);
3535         GPRReg op1GPR = op1.gpr();
3536         GPRReg op2GPR = op2.gpr();
3537     
3538         GPRReg op2TempGPR;
3539         GPRReg temp;
3540         GPRReg op1SaveGPR;
3541     
3542         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3543             op2TempGPR = allocate();
3544             temp = op2TempGPR;
3545         } else {
3546             op2TempGPR = InvalidGPRReg;
3547             if (op1GPR == X86Registers::eax)
3548                 temp = X86Registers::edx;
3549             else
3550                 temp = X86Registers::eax;
3551         }
3552     
3553         if (op1GPR == X86Registers::eax || op1GPR == X86Registers::edx) {
3554             op1SaveGPR = allocate();
3555             ASSERT(op1GPR != op1SaveGPR);
3556             m_jit.move(op1GPR, op1SaveGPR);
3557         } else
3558             op1SaveGPR = op1GPR;
3559     
3560         ASSERT(temp != op1GPR);
3561         ASSERT(temp != op2GPR);
3562         ASSERT(op1SaveGPR != X86Registers::eax);
3563         ASSERT(op1SaveGPR != X86Registers::edx);
3564     
3565         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3566     
3567         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3568     
3569         JITCompiler::JumpList done;
3570         
3571         // FIXME: -2^31 / -1 will actually yield negative zero, so we could have a
3572         // separate case for that. But it probably doesn't matter so much.
3573         if (shouldCheckOverflow(node->arithMode())) {
3574             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3575             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3576         } else {
3577             // This is the case where we convert the result to an int after we're done, and we
3578             // already know that the denominator is either -1 or 0. So, if the denominator is
3579             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3580             // -1) and the numerator is -2^31 then the result should be 0. Otherwise we are
3581             // happy to fall through to a normal division, since we're just dividing something
3582             // by negative 1.
3583         
3584             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3585             m_jit.move(TrustedImm32(0), edx.gpr());
3586             done.append(m_jit.jump());
3587         
3588             notZero.link(&m_jit);
3589             JITCompiler::Jump notNeg2ToThe31 =
3590                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3591             m_jit.move(TrustedImm32(0), edx.gpr());
3592             done.append(m_jit.jump());
3593         
3594             notNeg2ToThe31.link(&m_jit);
3595         }
3596         
3597         safeDenominator.link(&m_jit);
3598             
3599         if (op2TempGPR != InvalidGPRReg) {
3600             m_jit.move(op2GPR, op2TempGPR);
3601             op2GPR = op2TempGPR;
3602         }
3603             
3604         m_jit.move(op1GPR, eax.gpr());
3605         m_jit.assembler().cdq();
3606         m_jit.assembler().idivl_r(op2GPR);
3607             
3608         if (op2TempGPR != InvalidGPRReg)
3609             unlock(op2TempGPR);
3610
3611         // Check that we're not about to create negative zero.
3612         if (shouldCheckNegativeZero(node->arithMode())) {
3613             JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3614             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3615             numeratorPositive.link(&m_jit);
3616         }
3617     
3618         if (op1SaveGPR != op1GPR)
3619             unlock(op1SaveGPR);
3620             
3621         done.link(&m_jit);
3622         int32Result(edx.gpr(), node);
3623
3624 #elif HAVE(ARM_IDIV_INSTRUCTIONS) || CPU(ARM64)
3625         GPRTemporary temp(this);
3626         GPRTemporary quotientThenRemainder(this);
3627         GPRTemporary multiplyAnswer(this);
3628         GPRReg dividendGPR = op1.gpr();
3629         GPRReg divisorGPR = op2.gpr();
3630         GPRReg quotientThenRemainderGPR = quotientThenRemainder.gpr();
3631         GPRReg multiplyAnswerGPR = multiplyAnswer.gpr();
3632
3633         JITCompiler::JumpList done;
3634     
3635         if (shouldCheckOverflow(node->arithMode()))
3636             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, divisorGPR));
3637         else {
3638             JITCompiler::Jump denominatorNotZero = m_jit.branchTest32(JITCompiler::NonZero, divisorGPR);
3639             m_jit.move(divisorGPR, quotientThenRemainderGPR);
3640             done.append(m_jit.jump());
3641             denominatorNotZero.link(&m_jit);
3642         }
3643
3644         m_jit.assembler().sdiv<32>(quotientThenRemainderGPR, dividendGPR, divisorGPR);
3645         // FIXME: It seems like there are cases where we don't need this? What if we have
3646         // arithMode() == Arith::Unchecked?
3647         // https://bugs.webkit.org/show_bug.cgi?id=126444
3648         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotientThenRemainderGPR, divisorGPR, multiplyAnswerGPR));
3649 #if HAVE(ARM_IDIV_INSTRUCTIONS)
3650         m_jit.assembler().sub(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3651 #else
3652         m_jit.assembler().sub<32>(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3653 #endif
3654
3655         // If the user cares about negative zero, then speculate that we're not about
3656         // to produce negative zero.
3657         if (shouldCheckNegativeZero(node->arithMode())) {
3658             // Check that we're not about to create negative zero.
3659             JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3660             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, quotientThenRemainderGPR));
3661             numeratorPositive.link(&m_jit);
3662         }
3663
3664         done.link(&m_jit);
3665
3666         int32Result(quotientThenRemainderGPR, node);
3667 #else // not architecture that can do integer division
3668         RELEASE_ASSERT_NOT_REACHED();
3669 #endif
3670         return;
3671     }
3672         
3673     case DoubleRepUse: {
3674         SpeculateDoubleOperand op1(this, node->child1());
3675         SpeculateDoubleOperand op2(this, node->child2());
3676         
3677         FPRReg op1FPR = op1.fpr();
3678         FPRReg op2FPR = op2.fpr();
3679         
3680         flushRegisters();
3681         
3682         FPRResult result(this);
3683         
3684         callOperation(fmodAsDFGOperation, result.fpr(), op1FPR, op2FPR);
3685         
3686         doubleResult(result.fpr(), node);
3687         return;
3688     }
3689         
3690     default:
3691         RELEASE_ASSERT_NOT_REACHED();
3692         return;
3693     }
3694 }
3695
3696 void SpeculativeJIT::compileArithRound(Node* node)
3697 {
3698     ASSERT(node->child1().useKind() == DoubleRepUse);
3699
3700     SpeculateDoubleOperand value(this, node->child1());
3701     FPRReg valueFPR = value.fpr();
3702
3703     if (producesInteger(node->arithRoundingMode()) && !shouldCheckNegativeZero(node->arithRoundingMode())) {
3704         FPRTemporary oneHalf(this);
3705         GPRTemporary roundedResultAsInt32(this);
3706         FPRReg oneHalfFPR = oneHalf.fpr();
3707         GPRReg resultGPR = roundedResultAsInt32.gpr();
3708
3709         static const double halfConstant = 0.5;
3710         m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), oneHalfFPR);
3711         m_jit.addDouble(valueFPR, oneHalfFPR);
3712
3713         JITCompiler::Jump truncationFailed = m_jit.branchTruncateDoubleToInt32(oneHalfFPR, resultGPR);
3714         speculationCheck(Overflow, JSValueRegs(), node, truncationFailed);
3715         int32Result(resultGPR, node);
3716         return;
3717     }
3718
3719     flushRegisters();
3720     FPRResult roundedResultAsDouble(this);
3721     FPRReg resultFPR = roundedResultAsDouble.fpr();
3722     callOperation(jsRound, resultFPR, valueFPR);
3723     if (producesInteger(node->arithRoundingMode())) {
3724         GPRTemporary roundedResultAsInt32(this);
3725         FPRTemporary scratch(this);
3726         FPRReg scratchFPR = scratch.fpr();
3727         GPRReg resultGPR = roundedResultAsInt32.gpr();
3728         JITCompiler::JumpList failureCases;
3729         m_jit.branchConvertDoubleToInt32(resultFPR, resultGPR, failureCases, scratchFPR);
3730         speculationCheck(Overflow, JSValueRegs(), node, failureCases);
3731
3732         int32Result(resultGPR, node);
3733     } else
3734         doubleResult(resultFPR, node);
3735 }
3736
3737 void SpeculativeJIT::compileArithSqrt(Node* node)
3738 {
3739     SpeculateDoubleOperand op1(this, node->child1());
3740     FPRReg op1FPR = op1.fpr();
3741
3742     if (!MacroAssembler::supportsFloatingPointSqrt() || !Options::enableArchitectureSpecificOptimizations()) {
3743         flushRegisters();
3744         FPRResult result(this);
3745         callOperation(sqrt, result.fpr(), op1FPR);
3746         doubleResult(result.fpr(), node);
3747     } else {
3748         FPRTemporary result(this, op1);
3749         m_jit.sqrtDouble(op1.fpr(), result.fpr());
3750         doubleResult(result.fpr(), node);
3751     }
3752 }
3753
3754 // For small positive integers , it is worth doing a tiny inline loop to exponentiate the base.
3755 // Every register is clobbered by this helper.
3756 static MacroAssembler::Jump compileArithPowIntegerFastPath(JITCompiler& assembler, FPRReg xOperand, GPRReg yOperand, FPRReg result)
3757 {
3758     MacroAssembler::JumpList skipFastPath;
3759     skipFastPath.append(assembler.branch32(MacroAssembler::LessThan, yOperand, MacroAssembler::TrustedImm32(0)));
3760     skipFastPath.append(assembler.branch32(MacroAssembler::GreaterThan, yOperand, MacroAssembler::TrustedImm32(1000)));
3761
3762     static const double oneConstant = 1.0;
3763     assembler.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant), result);
3764
3765     MacroAssembler::Label startLoop(assembler.label());
3766     MacroAssembler::Jump exponentIsEven = assembler.branchTest32(MacroAssembler::Zero, yOperand, MacroAssembler::TrustedImm32(1));
3767     assembler.mulDouble(xOperand, result);
3768     exponentIsEven.link(&assembler);
3769     assembler.mulDouble(xOperand, xOperand);
3770     assembler.rshift32(MacroAssembler::TrustedImm32(1), yOperand);
3771     assembler.branchTest32(MacroAssembler::NonZero, yOperand).linkTo(startLoop, &assembler);
3772
3773     MacroAssembler::Jump skipSlowPath = assembler.jump();
3774     skipFastPath.link(&assembler);
3775
3776     return skipSlowPath;
3777 }
3778
3779 void SpeculativeJIT::compileArithPow(Node* node)
3780 {
3781     if (node->child2().useKind() == Int32Use) {
3782         SpeculateDoubleOperand xOperand(this, node->child1());
3783         SpeculateInt32Operand yOperand(this, node->child2());
3784         FPRReg xOperandfpr = xOperand.fpr();
3785         GPRReg yOperandGpr = yOperand.gpr();
3786         FPRTemporary yOperandfpr(this);
3787
3788         flushRegisters();
3789
3790         FPRResult result(this);
3791         FPRReg resultFpr = result.fpr();
3792
3793         FPRTemporary xOperandCopy(this);
3794         FPRReg xOperandCopyFpr = xOperandCopy.fpr();
3795         m_jit.moveDouble(xOperandfpr, xOperandCopyFpr);