3ad03dcba23c9666de6d839b2661bd33f6d92032
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGOSRExitFuzz.h"
38 #include "DFGSaneStringGetByValSlowPathGenerator.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DirectArguments.h"
41 #include "JITAddGenerator.h"
42 #include "JITBitAndGenerator.h"
43 #include "JITBitOrGenerator.h"
44 #include "JITBitXorGenerator.h"
45 #include "JITDivGenerator.h"
46 #include "JITLeftShiftGenerator.h"
47 #include "JITMulGenerator.h"
48 #include "JITRightShiftGenerator.h"
49 #include "JITSubGenerator.h"
50 #include "JSCInlines.h"
51 #include "JSEnvironmentRecord.h"
52 #include "JSGeneratorFunction.h"
53 #include "JSLexicalEnvironment.h"
54 #include "LinkBuffer.h"
55 #include "ScopedArguments.h"
56 #include "ScratchRegisterAllocator.h"
57 #include "WriteBarrierBuffer.h"
58 #include <wtf/MathExtras.h>
59
60 namespace JSC { namespace DFG {
61
62 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
63     : m_compileOkay(true)
64     , m_jit(jit)
65     , m_currentNode(0)
66     , m_lastGeneratedNode(LastNodeType)
67     , m_indexInBlock(0)
68     , m_generationInfo(m_jit.graph().frameRegisterCount())
69     , m_state(m_jit.graph())
70     , m_interpreter(m_jit.graph(), m_state)
71     , m_stream(&jit.jitCode()->variableEventStream)
72     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
73 {
74 }
75
76 SpeculativeJIT::~SpeculativeJIT()
77 {
78 }
79
80 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
81 {
82     ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
83     
84     GPRTemporary scratch(this);
85     GPRTemporary scratch2(this);
86     GPRReg scratchGPR = scratch.gpr();
87     GPRReg scratch2GPR = scratch2.gpr();
88     
89     unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
90     
91     JITCompiler::JumpList slowCases;
92     
93     slowCases.append(
94         emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
95     m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
96     emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
97     
98     m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
99     m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
100     
101     if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
102 #if USE(JSVALUE64)
103         m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
104         for (unsigned i = numElements; i < vectorLength; ++i)
105             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
106 #else
107         EncodedValueDescriptor value;
108         value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
109         for (unsigned i = numElements; i < vectorLength; ++i) {
110             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
111             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
112         }
113 #endif
114     }
115     
116     // I want a slow path that also loads out the storage pointer, and that's
117     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
118     // of work for a very small piece of functionality. :-/
119     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
120         slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
121         structure, numElements));
122 }
123
124 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
125 {
126     if (inlineCallFrame && !inlineCallFrame->isVarargs())
127         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
128     else {
129         VirtualRegister argumentCountRegister;
130         if (!inlineCallFrame)
131             argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
132         else
133             argumentCountRegister = inlineCallFrame->argumentCountRegister;
134         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
135         if (!includeThis)
136             m_jit.sub32(TrustedImm32(1), lengthGPR);
137     }
138 }
139
140 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
141 {
142     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
143 }
144
145 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
146 {
147     if (origin.inlineCallFrame) {
148         if (origin.inlineCallFrame->isClosureCall) {
149             m_jit.loadPtr(
150                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
151                 calleeGPR);
152         } else {
153             m_jit.move(
154                 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
155                 calleeGPR);
156         }
157     } else
158         m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), calleeGPR);
159 }
160
161 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
162 {
163     m_jit.addPtr(
164         TrustedImm32(
165             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
166         GPRInfo::callFrameRegister, startGPR);
167 }
168
169 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
170 {
171     if (!doOSRExitFuzzing())
172         return MacroAssembler::Jump();
173     
174     MacroAssembler::Jump result;
175     
176     m_jit.pushToSave(GPRInfo::regT0);
177     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
178     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
179     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
180     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
181     unsigned at = Options::fireOSRExitFuzzAt();
182     if (at || atOrAfter) {
183         unsigned threshold;
184         MacroAssembler::RelationalCondition condition;
185         if (atOrAfter) {
186             threshold = atOrAfter;
187             condition = MacroAssembler::Below;
188         } else {
189             threshold = at;
190             condition = MacroAssembler::NotEqual;
191         }
192         MacroAssembler::Jump ok = m_jit.branch32(
193             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
194         m_jit.popToRestore(GPRInfo::regT0);
195         result = m_jit.jump();
196         ok.link(&m_jit);
197     }
198     m_jit.popToRestore(GPRInfo::regT0);
199     
200     return result;
201 }
202
203 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
204 {
205     if (!m_compileOkay)
206         return;
207     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
208     if (fuzzJump.isSet()) {
209         JITCompiler::JumpList jumpsToFail;
210         jumpsToFail.append(fuzzJump);
211         jumpsToFail.append(jumpToFail);
212         m_jit.appendExitInfo(jumpsToFail);
213     } else
214         m_jit.appendExitInfo(jumpToFail);
215     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
216 }
217
218 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
219 {
220     if (!m_compileOkay)
221         return;
222     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
223     if (fuzzJump.isSet()) {
224         JITCompiler::JumpList myJumpsToFail;
225         myJumpsToFail.append(jumpsToFail);
226         myJumpsToFail.append(fuzzJump);
227         m_jit.appendExitInfo(myJumpsToFail);
228     } else
229         m_jit.appendExitInfo(jumpsToFail);
230     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
231 }
232
233 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
234 {
235     if (!m_compileOkay)
236         return OSRExitJumpPlaceholder();
237     unsigned index = m_jit.jitCode()->osrExit.size();
238     m_jit.appendExitInfo();
239     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
240     return OSRExitJumpPlaceholder(index);
241 }
242
243 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
244 {
245     return speculationCheck(kind, jsValueSource, nodeUse.node());
246 }
247
248 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
249 {
250     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
251 }
252
253 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
254 {
255     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
256 }
257
258 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
259 {
260     if (!m_compileOkay)
261         return;
262     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
263     m_jit.appendExitInfo(jumpToFail);
264     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
265 }
266
267 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
268 {
269     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
270 }
271
272 void SpeculativeJIT::emitInvalidationPoint(Node* node)
273 {
274     if (!m_compileOkay)
275         return;
276     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
277     m_jit.jitCode()->appendOSRExit(OSRExit(
278         UncountableInvalidation, JSValueSource(),
279         m_jit.graph().methodOfGettingAValueProfileFor(node),
280         this, m_stream->size()));
281     info.m_replacementSource = m_jit.watchpointLabel();
282     ASSERT(info.m_replacementSource.isSet());
283     noResult(node);
284 }
285
286 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
287 {
288     if (!m_compileOkay)
289         return;
290     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
291     m_compileOkay = false;
292     if (verboseCompilationEnabled())
293         dataLog("Bailing compilation.\n");
294 }
295
296 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
297 {
298     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
299 }
300
301 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind exitKind)
302 {
303     ASSERT(needsTypeCheck(edge, typesPassedThrough));
304     m_interpreter.filter(edge, typesPassedThrough);
305     speculationCheck(exitKind, source, edge.node(), jumpToFail);
306 }
307
308 RegisterSet SpeculativeJIT::usedRegisters()
309 {
310     RegisterSet result;
311     
312     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
313         GPRReg gpr = GPRInfo::toRegister(i);
314         if (m_gprs.isInUse(gpr))
315             result.set(gpr);
316     }
317     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
318         FPRReg fpr = FPRInfo::toRegister(i);
319         if (m_fprs.isInUse(fpr))
320             result.set(fpr);
321     }
322     
323     result.merge(RegisterSet::stubUnavailableRegisters());
324     
325     return result;
326 }
327
328 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
329 {
330     m_slowPathGenerators.append(WTFMove(slowPathGenerator));
331 }
332
333 void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder)
334 {
335     for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i) {
336         pcToCodeOriginMapBuilder.appendItem(m_jit.label(), m_slowPathGenerators[i]->origin().semantic);
337         m_slowPathGenerators[i]->generate(this);
338     }
339 }
340
341 // On Windows we need to wrap fmod; on other platforms we can call it directly.
342 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
343 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
344 static double JIT_OPERATION fmodAsDFGOperation(double x, double y)
345 {
346     return fmod(x, y);
347 }
348 #else
349 #define fmodAsDFGOperation fmod
350 #endif
351
352 void SpeculativeJIT::clearGenerationInfo()
353 {
354     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
355         m_generationInfo[i] = GenerationInfo();
356     m_gprs = RegisterBank<GPRInfo>();
357     m_fprs = RegisterBank<FPRInfo>();
358 }
359
360 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
361 {
362     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
363     Node* node = info.node();
364     DataFormat registerFormat = info.registerFormat();
365     ASSERT(registerFormat != DataFormatNone);
366     ASSERT(registerFormat != DataFormatDouble);
367         
368     SilentSpillAction spillAction;
369     SilentFillAction fillAction;
370         
371     if (!info.needsSpill())
372         spillAction = DoNothingForSpill;
373     else {
374 #if USE(JSVALUE64)
375         ASSERT(info.gpr() == source);
376         if (registerFormat == DataFormatInt32)
377             spillAction = Store32Payload;
378         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
379             spillAction = StorePtr;
380         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
381             spillAction = Store64;
382         else {
383             ASSERT(registerFormat & DataFormatJS);
384             spillAction = Store64;
385         }
386 #elif USE(JSVALUE32_64)
387         if (registerFormat & DataFormatJS) {
388             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
389             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
390         } else {
391             ASSERT(info.gpr() == source);
392             spillAction = Store32Payload;
393         }
394 #endif
395     }
396         
397     if (registerFormat == DataFormatInt32) {
398         ASSERT(info.gpr() == source);
399         ASSERT(isJSInt32(info.registerFormat()));
400         if (node->hasConstant()) {
401             ASSERT(node->isInt32Constant());
402             fillAction = SetInt32Constant;
403         } else
404             fillAction = Load32Payload;
405     } else if (registerFormat == DataFormatBoolean) {
406 #if USE(JSVALUE64)
407         RELEASE_ASSERT_NOT_REACHED();
408 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
409         fillAction = DoNothingForFill;
410 #endif
411 #elif USE(JSVALUE32_64)
412         ASSERT(info.gpr() == source);
413         if (node->hasConstant()) {
414             ASSERT(node->isBooleanConstant());
415             fillAction = SetBooleanConstant;
416         } else
417             fillAction = Load32Payload;
418 #endif
419     } else if (registerFormat == DataFormatCell) {
420         ASSERT(info.gpr() == source);
421         if (node->hasConstant()) {
422             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
423             node->asCell(); // To get the assertion.
424             fillAction = SetCellConstant;
425         } else {
426 #if USE(JSVALUE64)
427             fillAction = LoadPtr;
428 #else
429             fillAction = Load32Payload;
430 #endif
431         }
432     } else if (registerFormat == DataFormatStorage) {
433         ASSERT(info.gpr() == source);
434         fillAction = LoadPtr;
435     } else if (registerFormat == DataFormatInt52) {
436         if (node->hasConstant())
437             fillAction = SetInt52Constant;
438         else if (info.spillFormat() == DataFormatInt52)
439             fillAction = Load64;
440         else if (info.spillFormat() == DataFormatStrictInt52)
441             fillAction = Load64ShiftInt52Left;
442         else if (info.spillFormat() == DataFormatNone)
443             fillAction = Load64;
444         else {
445             RELEASE_ASSERT_NOT_REACHED();
446 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
447             fillAction = Load64; // Make GCC happy.
448 #endif
449         }
450     } else if (registerFormat == DataFormatStrictInt52) {
451         if (node->hasConstant())
452             fillAction = SetStrictInt52Constant;
453         else if (info.spillFormat() == DataFormatInt52)
454             fillAction = Load64ShiftInt52Right;
455         else if (info.spillFormat() == DataFormatStrictInt52)
456             fillAction = Load64;
457         else if (info.spillFormat() == DataFormatNone)
458             fillAction = Load64;
459         else {
460             RELEASE_ASSERT_NOT_REACHED();
461 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
462             fillAction = Load64; // Make GCC happy.
463 #endif
464         }
465     } else {
466         ASSERT(registerFormat & DataFormatJS);
467 #if USE(JSVALUE64)
468         ASSERT(info.gpr() == source);
469         if (node->hasConstant()) {
470             if (node->isCellConstant())
471                 fillAction = SetTrustedJSConstant;
472             else
473                 fillAction = SetJSConstant;
474         } else if (info.spillFormat() == DataFormatInt32) {
475             ASSERT(registerFormat == DataFormatJSInt32);
476             fillAction = Load32PayloadBoxInt;
477         } else
478             fillAction = Load64;
479 #else
480         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
481         if (node->hasConstant())
482             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
483         else if (info.payloadGPR() == source)
484             fillAction = Load32Payload;
485         else { // Fill the Tag
486             switch (info.spillFormat()) {
487             case DataFormatInt32:
488                 ASSERT(registerFormat == DataFormatJSInt32);
489                 fillAction = SetInt32Tag;
490                 break;
491             case DataFormatCell:
492                 ASSERT(registerFormat == DataFormatJSCell);
493                 fillAction = SetCellTag;
494                 break;
495             case DataFormatBoolean:
496                 ASSERT(registerFormat == DataFormatJSBoolean);
497                 fillAction = SetBooleanTag;
498                 break;
499             default:
500                 fillAction = Load32Tag;
501                 break;
502             }
503         }
504 #endif
505     }
506         
507     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
508 }
509     
510 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
511 {
512     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
513     Node* node = info.node();
514     ASSERT(info.registerFormat() == DataFormatDouble);
515
516     SilentSpillAction spillAction;
517     SilentFillAction fillAction;
518         
519     if (!info.needsSpill())
520         spillAction = DoNothingForSpill;
521     else {
522         ASSERT(!node->hasConstant());
523         ASSERT(info.spillFormat() == DataFormatNone);
524         ASSERT(info.fpr() == source);
525         spillAction = StoreDouble;
526     }
527         
528 #if USE(JSVALUE64)
529     if (node->hasConstant()) {
530         node->asNumber(); // To get the assertion.
531         fillAction = SetDoubleConstant;
532     } else {
533         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
534         fillAction = LoadDouble;
535     }
536 #elif USE(JSVALUE32_64)
537     ASSERT(info.registerFormat() == DataFormatDouble);
538     if (node->hasConstant()) {
539         node->asNumber(); // To get the assertion.
540         fillAction = SetDoubleConstant;
541     } else
542         fillAction = LoadDouble;
543 #endif
544
545     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
546 }
547     
548 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
549 {
550     switch (plan.spillAction()) {
551     case DoNothingForSpill:
552         break;
553     case Store32Tag:
554         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
555         break;
556     case Store32Payload:
557         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
558         break;
559     case StorePtr:
560         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
561         break;
562 #if USE(JSVALUE64)
563     case Store64:
564         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
565         break;
566 #endif
567     case StoreDouble:
568         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
569         break;
570     default:
571         RELEASE_ASSERT_NOT_REACHED();
572     }
573 }
574     
575 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
576 {
577 #if USE(JSVALUE32_64)
578     UNUSED_PARAM(canTrample);
579 #endif
580     switch (plan.fillAction()) {
581     case DoNothingForFill:
582         break;
583     case SetInt32Constant:
584         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
585         break;
586 #if USE(JSVALUE64)
587     case SetInt52Constant:
588         m_jit.move(Imm64(plan.node()->asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
589         break;
590     case SetStrictInt52Constant:
591         m_jit.move(Imm64(plan.node()->asMachineInt()), plan.gpr());
592         break;
593 #endif // USE(JSVALUE64)
594     case SetBooleanConstant:
595         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
596         break;
597     case SetCellConstant:
598         m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
599         break;
600 #if USE(JSVALUE64)
601     case SetTrustedJSConstant:
602         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
603         break;
604     case SetJSConstant:
605         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
606         break;
607     case SetDoubleConstant:
608         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
609         m_jit.move64ToDouble(canTrample, plan.fpr());
610         break;
611     case Load32PayloadBoxInt:
612         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
613         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
614         break;
615     case Load32PayloadConvertToInt52:
616         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
617         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
618         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
619         break;
620     case Load32PayloadSignExtend:
621         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
622         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
623         break;
624 #else
625     case SetJSConstantTag:
626         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
627         break;
628     case SetJSConstantPayload:
629         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
630         break;
631     case SetInt32Tag:
632         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
633         break;
634     case SetCellTag:
635         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
636         break;
637     case SetBooleanTag:
638         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
639         break;
640     case SetDoubleConstant:
641         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
642         break;
643 #endif
644     case Load32Tag:
645         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
646         break;
647     case Load32Payload:
648         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
649         break;
650     case LoadPtr:
651         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
652         break;
653 #if USE(JSVALUE64)
654     case Load64:
655         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
656         break;
657     case Load64ShiftInt52Right:
658         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
659         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
660         break;
661     case Load64ShiftInt52Left:
662         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
663         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
664         break;
665 #endif
666     case LoadDouble:
667         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
668         break;
669     default:
670         RELEASE_ASSERT_NOT_REACHED();
671     }
672 }
673     
674 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
675 {
676     switch (arrayMode.arrayClass()) {
677     case Array::OriginalArray: {
678         CRASH();
679 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
680         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
681         return result;
682 #endif
683     }
684         
685     case Array::Array:
686         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
687         return m_jit.branch32(
688             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
689         
690     case Array::NonArray:
691     case Array::OriginalNonArray:
692         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
693         return m_jit.branch32(
694             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
695         
696     case Array::PossiblyArray:
697         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
698         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
699     }
700     
701     RELEASE_ASSERT_NOT_REACHED();
702     return JITCompiler::Jump();
703 }
704
705 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
706 {
707     JITCompiler::JumpList result;
708     
709     switch (arrayMode.type()) {
710     case Array::Int32:
711         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
712
713     case Array::Double:
714         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
715
716     case Array::Contiguous:
717         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
718
719     case Array::Undecided:
720         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
721
722     case Array::ArrayStorage:
723     case Array::SlowPutArrayStorage: {
724         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
725         
726         if (arrayMode.isJSArray()) {
727             if (arrayMode.isSlowPut()) {
728                 result.append(
729                     m_jit.branchTest32(
730                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
731                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
732                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
733                 result.append(
734                     m_jit.branch32(
735                         MacroAssembler::Above, tempGPR,
736                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
737                 break;
738             }
739             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
740             result.append(
741                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
742             break;
743         }
744         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
745         if (arrayMode.isSlowPut()) {
746             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
747             result.append(
748                 m_jit.branch32(
749                     MacroAssembler::Above, tempGPR,
750                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
751             break;
752         }
753         result.append(
754             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
755         break;
756     }
757     default:
758         CRASH();
759         break;
760     }
761     
762     return result;
763 }
764
765 void SpeculativeJIT::checkArray(Node* node)
766 {
767     ASSERT(node->arrayMode().isSpecific());
768     ASSERT(!node->arrayMode().doesConversion());
769     
770     SpeculateCellOperand base(this, node->child1());
771     GPRReg baseReg = base.gpr();
772     
773     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
774         noResult(m_currentNode);
775         return;
776     }
777     
778     const ClassInfo* expectedClassInfo = 0;
779     
780     switch (node->arrayMode().type()) {
781     case Array::AnyTypedArray:
782     case Array::String:
783         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
784         break;
785     case Array::Int32:
786     case Array::Double:
787     case Array::Contiguous:
788     case Array::Undecided:
789     case Array::ArrayStorage:
790     case Array::SlowPutArrayStorage: {
791         GPRTemporary temp(this);
792         GPRReg tempGPR = temp.gpr();
793         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
794         speculationCheck(
795             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
796             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
797         
798         noResult(m_currentNode);
799         return;
800     }
801     case Array::DirectArguments:
802         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
803         noResult(m_currentNode);
804         return;
805     case Array::ScopedArguments:
806         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
807         noResult(m_currentNode);
808         return;
809     default:
810         speculateCellTypeWithoutTypeFiltering(
811             node->child1(), baseReg,
812             typeForTypedArrayType(node->arrayMode().typedArrayType()));
813         noResult(m_currentNode);
814         return;
815     }
816     
817     RELEASE_ASSERT(expectedClassInfo);
818     
819     GPRTemporary temp(this);
820     GPRTemporary temp2(this);
821     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
822     speculationCheck(
823         BadType, JSValueSource::unboxedCell(baseReg), node,
824         m_jit.branchPtr(
825             MacroAssembler::NotEqual,
826             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
827             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
828     
829     noResult(m_currentNode);
830 }
831
832 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
833 {
834     ASSERT(node->arrayMode().doesConversion());
835     
836     GPRTemporary temp(this);
837     GPRTemporary structure;
838     GPRReg tempGPR = temp.gpr();
839     GPRReg structureGPR = InvalidGPRReg;
840     
841     if (node->op() != ArrayifyToStructure) {
842         GPRTemporary realStructure(this);
843         structure.adopt(realStructure);
844         structureGPR = structure.gpr();
845     }
846         
847     // We can skip all that comes next if we already have array storage.
848     MacroAssembler::JumpList slowPath;
849     
850     if (node->op() == ArrayifyToStructure) {
851         slowPath.append(m_jit.branchWeakStructure(
852             JITCompiler::NotEqual,
853             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
854             node->structure()));
855     } else {
856         m_jit.load8(
857             MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
858         
859         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
860     }
861     
862     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
863         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
864     
865     noResult(m_currentNode);
866 }
867
868 void SpeculativeJIT::arrayify(Node* node)
869 {
870     ASSERT(node->arrayMode().isSpecific());
871     
872     SpeculateCellOperand base(this, node->child1());
873     
874     if (!node->child2()) {
875         arrayify(node, base.gpr(), InvalidGPRReg);
876         return;
877     }
878     
879     SpeculateInt32Operand property(this, node->child2());
880     
881     arrayify(node, base.gpr(), property.gpr());
882 }
883
884 GPRReg SpeculativeJIT::fillStorage(Edge edge)
885 {
886     VirtualRegister virtualRegister = edge->virtualRegister();
887     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
888     
889     switch (info.registerFormat()) {
890     case DataFormatNone: {
891         if (info.spillFormat() == DataFormatStorage) {
892             GPRReg gpr = allocate();
893             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
894             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
895             info.fillStorage(*m_stream, gpr);
896             return gpr;
897         }
898         
899         // Must be a cell; fill it as a cell and then return the pointer.
900         return fillSpeculateCell(edge);
901     }
902         
903     case DataFormatStorage: {
904         GPRReg gpr = info.gpr();
905         m_gprs.lock(gpr);
906         return gpr;
907     }
908         
909     default:
910         return fillSpeculateCell(edge);
911     }
912 }
913
914 void SpeculativeJIT::useChildren(Node* node)
915 {
916     if (node->flags() & NodeHasVarArgs) {
917         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
918             if (!!m_jit.graph().m_varArgChildren[childIdx])
919                 use(m_jit.graph().m_varArgChildren[childIdx]);
920         }
921     } else {
922         Edge child1 = node->child1();
923         if (!child1) {
924             ASSERT(!node->child2() && !node->child3());
925             return;
926         }
927         use(child1);
928         
929         Edge child2 = node->child2();
930         if (!child2) {
931             ASSERT(!node->child3());
932             return;
933         }
934         use(child2);
935         
936         Edge child3 = node->child3();
937         if (!child3)
938             return;
939         use(child3);
940     }
941 }
942
943 void SpeculativeJIT::compileIn(Node* node)
944 {
945     SpeculateCellOperand base(this, node->child2());
946     GPRReg baseGPR = base.gpr();
947     
948     if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
949         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
950             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
951             
952             GPRTemporary result(this);
953             GPRReg resultGPR = result.gpr();
954
955             use(node->child1());
956             
957             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
958             MacroAssembler::Label done = m_jit.label();
959             
960             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
961             // we can cast it to const AtomicStringImpl* safely.
962             auto slowPath = slowPathCall(
963                 jump.m_jump, this, operationInOptimize,
964                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
965                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
966             
967             stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
968             stubInfo->codeOrigin = node->origin.semantic;
969             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
970             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
971 #if USE(JSVALUE32_64)
972             stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
973             stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
974 #endif
975             stubInfo->patch.usedRegisters = usedRegisters();
976
977             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
978             addSlowPathGenerator(WTFMove(slowPath));
979
980             base.use();
981
982             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
983             return;
984         }
985     }
986
987     JSValueOperand key(this, node->child1());
988     JSValueRegs regs = key.jsValueRegs();
989         
990     GPRFlushedCallResult result(this);
991     GPRReg resultGPR = result.gpr();
992         
993     base.use();
994     key.use();
995         
996     flushRegisters();
997     callOperation(
998         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
999         baseGPR, regs);
1000     m_jit.exceptionCheck();
1001     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1002 }
1003
1004 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1005 {
1006     unsigned branchIndexInBlock = detectPeepHoleBranch();
1007     if (branchIndexInBlock != UINT_MAX) {
1008         Node* branchNode = m_block->at(branchIndexInBlock);
1009
1010         ASSERT(node->adjustedRefCount() == 1);
1011         
1012         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1013     
1014         m_indexInBlock = branchIndexInBlock;
1015         m_currentNode = branchNode;
1016         
1017         return true;
1018     }
1019     
1020     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1021     
1022     return false;
1023 }
1024
1025 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1026 {
1027     unsigned branchIndexInBlock = detectPeepHoleBranch();
1028     if (branchIndexInBlock != UINT_MAX) {
1029         Node* branchNode = m_block->at(branchIndexInBlock);
1030
1031         ASSERT(node->adjustedRefCount() == 1);
1032         
1033         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1034     
1035         m_indexInBlock = branchIndexInBlock;
1036         m_currentNode = branchNode;
1037         
1038         return true;
1039     }
1040     
1041     nonSpeculativeNonPeepholeStrictEq(node, invert);
1042     
1043     return false;
1044 }
1045
1046 static const char* dataFormatString(DataFormat format)
1047 {
1048     // These values correspond to the DataFormat enum.
1049     const char* strings[] = {
1050         "[  ]",
1051         "[ i]",
1052         "[ d]",
1053         "[ c]",
1054         "Err!",
1055         "Err!",
1056         "Err!",
1057         "Err!",
1058         "[J ]",
1059         "[Ji]",
1060         "[Jd]",
1061         "[Jc]",
1062         "Err!",
1063         "Err!",
1064         "Err!",
1065         "Err!",
1066     };
1067     return strings[format];
1068 }
1069
1070 void SpeculativeJIT::dump(const char* label)
1071 {
1072     if (label)
1073         dataLogF("<%s>\n", label);
1074
1075     dataLogF("  gprs:\n");
1076     m_gprs.dump();
1077     dataLogF("  fprs:\n");
1078     m_fprs.dump();
1079     dataLogF("  VirtualRegisters:\n");
1080     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1081         GenerationInfo& info = m_generationInfo[i];
1082         if (info.alive())
1083             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1084         else
1085             dataLogF("    % 3d:[__][__]", i);
1086         if (info.registerFormat() == DataFormatDouble)
1087             dataLogF(":fpr%d\n", info.fpr());
1088         else if (info.registerFormat() != DataFormatNone
1089 #if USE(JSVALUE32_64)
1090             && !(info.registerFormat() & DataFormatJS)
1091 #endif
1092             ) {
1093             ASSERT(info.gpr() != InvalidGPRReg);
1094             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1095         } else
1096             dataLogF("\n");
1097     }
1098     if (label)
1099         dataLogF("</%s>\n", label);
1100 }
1101
1102 GPRTemporary::GPRTemporary()
1103     : m_jit(0)
1104     , m_gpr(InvalidGPRReg)
1105 {
1106 }
1107
1108 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1109     : m_jit(jit)
1110     , m_gpr(InvalidGPRReg)
1111 {
1112     m_gpr = m_jit->allocate();
1113 }
1114
1115 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1116     : m_jit(jit)
1117     , m_gpr(InvalidGPRReg)
1118 {
1119     m_gpr = m_jit->allocate(specific);
1120 }
1121
1122 #if USE(JSVALUE32_64)
1123 GPRTemporary::GPRTemporary(
1124     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1125     : m_jit(jit)
1126     , m_gpr(InvalidGPRReg)
1127 {
1128     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1129         m_gpr = m_jit->reuse(op1.gpr(which));
1130     else
1131         m_gpr = m_jit->allocate();
1132 }
1133 #endif // USE(JSVALUE32_64)
1134
1135 JSValueRegsTemporary::JSValueRegsTemporary() { }
1136
1137 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1138 #if USE(JSVALUE64)
1139     : m_gpr(jit)
1140 #else
1141     : m_payloadGPR(jit)
1142     , m_tagGPR(jit)
1143 #endif
1144 {
1145 }
1146
1147 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1148
1149 JSValueRegs JSValueRegsTemporary::regs()
1150 {
1151 #if USE(JSVALUE64)
1152     return JSValueRegs(m_gpr.gpr());
1153 #else
1154     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1155 #endif
1156 }
1157
1158 void GPRTemporary::adopt(GPRTemporary& other)
1159 {
1160     ASSERT(!m_jit);
1161     ASSERT(m_gpr == InvalidGPRReg);
1162     ASSERT(other.m_jit);
1163     ASSERT(other.m_gpr != InvalidGPRReg);
1164     m_jit = other.m_jit;
1165     m_gpr = other.m_gpr;
1166     other.m_jit = 0;
1167     other.m_gpr = InvalidGPRReg;
1168 }
1169
1170 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1171     : m_jit(jit)
1172     , m_fpr(InvalidFPRReg)
1173 {
1174     m_fpr = m_jit->fprAllocate();
1175 }
1176
1177 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1178     : m_jit(jit)
1179     , m_fpr(InvalidFPRReg)
1180 {
1181     if (m_jit->canReuse(op1.node()))
1182         m_fpr = m_jit->reuse(op1.fpr());
1183     else
1184         m_fpr = m_jit->fprAllocate();
1185 }
1186
1187 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1188     : m_jit(jit)
1189     , m_fpr(InvalidFPRReg)
1190 {
1191     if (m_jit->canReuse(op1.node()))
1192         m_fpr = m_jit->reuse(op1.fpr());
1193     else if (m_jit->canReuse(op2.node()))
1194         m_fpr = m_jit->reuse(op2.fpr());
1195     else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1196         m_fpr = m_jit->reuse(op1.fpr());
1197     else
1198         m_fpr = m_jit->fprAllocate();
1199 }
1200
1201 #if USE(JSVALUE32_64)
1202 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1203     : m_jit(jit)
1204     , m_fpr(InvalidFPRReg)
1205 {
1206     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1207         m_fpr = m_jit->reuse(op1.fpr());
1208     else
1209         m_fpr = m_jit->fprAllocate();
1210 }
1211 #endif
1212
1213 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1214 {
1215     BasicBlock* taken = branchNode->branchData()->taken.block;
1216     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1217     
1218     SpeculateDoubleOperand op1(this, node->child1());
1219     SpeculateDoubleOperand op2(this, node->child2());
1220     
1221     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1222     jump(notTaken);
1223 }
1224
1225 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1226 {
1227     BasicBlock* taken = branchNode->branchData()->taken.block;
1228     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1229
1230     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1231     
1232     if (taken == nextBlock()) {
1233         condition = MacroAssembler::NotEqual;
1234         BasicBlock* tmp = taken;
1235         taken = notTaken;
1236         notTaken = tmp;
1237     }
1238
1239     SpeculateCellOperand op1(this, node->child1());
1240     SpeculateCellOperand op2(this, node->child2());
1241     
1242     GPRReg op1GPR = op1.gpr();
1243     GPRReg op2GPR = op2.gpr();
1244     
1245     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1246         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1247             speculationCheck(
1248                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1249         }
1250         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1251             speculationCheck(
1252                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1253         }
1254     } else {
1255         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1256             speculationCheck(
1257                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1258                 m_jit.branchIfNotObject(op1GPR));
1259         }
1260         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1261             m_jit.branchTest8(
1262                 MacroAssembler::NonZero, 
1263                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1264                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1265
1266         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1267             speculationCheck(
1268                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1269                 m_jit.branchIfNotObject(op2GPR));
1270         }
1271         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1272             m_jit.branchTest8(
1273                 MacroAssembler::NonZero, 
1274                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1275                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1276     }
1277
1278     branchPtr(condition, op1GPR, op2GPR, taken);
1279     jump(notTaken);
1280 }
1281
1282 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1283 {
1284     BasicBlock* taken = branchNode->branchData()->taken.block;
1285     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1286
1287     // The branch instruction will branch to the taken block.
1288     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1289     if (taken == nextBlock()) {
1290         condition = JITCompiler::invert(condition);
1291         BasicBlock* tmp = taken;
1292         taken = notTaken;
1293         notTaken = tmp;
1294     }
1295
1296     if (node->child1()->isInt32Constant()) {
1297         int32_t imm = node->child1()->asInt32();
1298         SpeculateBooleanOperand op2(this, node->child2());
1299         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1300     } else if (node->child2()->isInt32Constant()) {
1301         SpeculateBooleanOperand op1(this, node->child1());
1302         int32_t imm = node->child2()->asInt32();
1303         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1304     } else {
1305         SpeculateBooleanOperand op1(this, node->child1());
1306         SpeculateBooleanOperand op2(this, node->child2());
1307         branch32(condition, op1.gpr(), op2.gpr(), taken);
1308     }
1309
1310     jump(notTaken);
1311 }
1312
1313 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1314 {
1315     BasicBlock* taken = branchNode->branchData()->taken.block;
1316     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1317
1318     // The branch instruction will branch to the taken block.
1319     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1320     if (taken == nextBlock()) {
1321         condition = JITCompiler::invert(condition);
1322         BasicBlock* tmp = taken;
1323         taken = notTaken;
1324         notTaken = tmp;
1325     }
1326
1327     if (node->child1()->isInt32Constant()) {
1328         int32_t imm = node->child1()->asInt32();
1329         SpeculateInt32Operand op2(this, node->child2());
1330         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1331     } else if (node->child2()->isInt32Constant()) {
1332         SpeculateInt32Operand op1(this, node->child1());
1333         int32_t imm = node->child2()->asInt32();
1334         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1335     } else {
1336         SpeculateInt32Operand op1(this, node->child1());
1337         SpeculateInt32Operand op2(this, node->child2());
1338         branch32(condition, op1.gpr(), op2.gpr(), taken);
1339     }
1340
1341     jump(notTaken);
1342 }
1343
1344 // Returns true if the compare is fused with a subsequent branch.
1345 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1346 {
1347     // Fused compare & branch.
1348     unsigned branchIndexInBlock = detectPeepHoleBranch();
1349     if (branchIndexInBlock != UINT_MAX) {
1350         Node* branchNode = m_block->at(branchIndexInBlock);
1351
1352         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1353         // so can be no intervening nodes to also reference the compare. 
1354         ASSERT(node->adjustedRefCount() == 1);
1355
1356         if (node->isBinaryUseKind(Int32Use))
1357             compilePeepHoleInt32Branch(node, branchNode, condition);
1358 #if USE(JSVALUE64)
1359         else if (node->isBinaryUseKind(Int52RepUse))
1360             compilePeepHoleInt52Branch(node, branchNode, condition);
1361 #endif // USE(JSVALUE64)
1362         else if (node->isBinaryUseKind(DoubleRepUse))
1363             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1364         else if (node->op() == CompareEq) {
1365             if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1366                 // Use non-peephole comparison, for now.
1367                 return false;
1368             }
1369             if (node->isBinaryUseKind(BooleanUse))
1370                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1371             else if (node->isBinaryUseKind(SymbolUse))
1372                 compilePeepHoleSymbolEquality(node, branchNode);
1373             else if (node->isBinaryUseKind(ObjectUse))
1374                 compilePeepHoleObjectEquality(node, branchNode);
1375             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1376                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1377             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1378                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1379             else if (!needsTypeCheck(node->child1(), SpecOther))
1380                 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1381             else if (!needsTypeCheck(node->child2(), SpecOther))
1382                 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1383             else {
1384                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1385                 return true;
1386             }
1387         } else {
1388             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1389             return true;
1390         }
1391
1392         use(node->child1());
1393         use(node->child2());
1394         m_indexInBlock = branchIndexInBlock;
1395         m_currentNode = branchNode;
1396         return true;
1397     }
1398     return false;
1399 }
1400
1401 void SpeculativeJIT::noticeOSRBirth(Node* node)
1402 {
1403     if (!node->hasVirtualRegister())
1404         return;
1405     
1406     VirtualRegister virtualRegister = node->virtualRegister();
1407     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1408     
1409     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1410 }
1411
1412 void SpeculativeJIT::compileMovHint(Node* node)
1413 {
1414     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1415     
1416     Node* child = node->child1().node();
1417     noticeOSRBirth(child);
1418     
1419     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1420 }
1421
1422 void SpeculativeJIT::bail(AbortReason reason)
1423 {
1424     if (verboseCompilationEnabled())
1425         dataLog("Bailing compilation.\n");
1426     m_compileOkay = true;
1427     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1428     clearGenerationInfo();
1429 }
1430
1431 void SpeculativeJIT::compileCurrentBlock()
1432 {
1433     ASSERT(m_compileOkay);
1434     
1435     if (!m_block)
1436         return;
1437     
1438     ASSERT(m_block->isReachable);
1439     
1440     m_jit.blockHeads()[m_block->index] = m_jit.label();
1441
1442     if (!m_block->intersectionOfCFAHasVisited) {
1443         // Don't generate code for basic blocks that are unreachable according to CFA.
1444         // But to be sure that nobody has generated a jump to this block, drop in a
1445         // breakpoint here.
1446         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1447         return;
1448     }
1449
1450     m_stream->appendAndLog(VariableEvent::reset());
1451     
1452     m_jit.jitAssertHasValidCallFrame();
1453     m_jit.jitAssertTagsInPlace();
1454     m_jit.jitAssertArgumentCountSane();
1455
1456     m_state.reset();
1457     m_state.beginBasicBlock(m_block);
1458     
1459     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1460         int operand = m_block->variablesAtHead.operandForIndex(i);
1461         Node* node = m_block->variablesAtHead[i];
1462         if (!node)
1463             continue; // No need to record dead SetLocal's.
1464         
1465         VariableAccessData* variable = node->variableAccessData();
1466         DataFormat format;
1467         if (!node->refCount())
1468             continue; // No need to record dead SetLocal's.
1469         format = dataFormatFor(variable->flushFormat());
1470         m_stream->appendAndLog(
1471             VariableEvent::setLocal(
1472                 VirtualRegister(operand),
1473                 variable->machineLocal(),
1474                 format));
1475     }
1476
1477     m_origin = NodeOrigin();
1478     
1479     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1480         m_currentNode = m_block->at(m_indexInBlock);
1481         
1482         // We may have hit a contradiction that the CFA was aware of but that the JIT
1483         // didn't cause directly.
1484         if (!m_state.isValid()) {
1485             bail(DFGBailedAtTopOfBlock);
1486             return;
1487         }
1488
1489         m_interpreter.startExecuting();
1490         m_jit.setForNode(m_currentNode);
1491         m_origin = m_currentNode->origin;
1492         if (validationEnabled())
1493             m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1494         m_lastGeneratedNode = m_currentNode->op();
1495         
1496         ASSERT(m_currentNode->shouldGenerate());
1497         
1498         if (verboseCompilationEnabled()) {
1499             dataLogF(
1500                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1501                 (int)m_currentNode->index(),
1502                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1503             dataLog("\n");
1504         }
1505
1506         if (Options::validateDFGExceptionHandling() && mayExit(m_jit.graph(), m_currentNode) != DoesNotExit)
1507             m_jit.jitReleaseAssertNoException();
1508
1509         m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.label(), m_origin.semantic);
1510
1511         compile(m_currentNode);
1512         
1513         if (belongsInMinifiedGraph(m_currentNode->op()))
1514             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1515         
1516 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1517         m_jit.clearRegisterAllocationOffsets();
1518 #endif
1519         
1520         if (!m_compileOkay) {
1521             bail(DFGBailedAtEndOfNode);
1522             return;
1523         }
1524         
1525         // Make sure that the abstract state is rematerialized for the next node.
1526         m_interpreter.executeEffects(m_indexInBlock);
1527     }
1528     
1529     // Perform the most basic verification that children have been used correctly.
1530     if (!ASSERT_DISABLED) {
1531         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1532             GenerationInfo& info = m_generationInfo[index];
1533             RELEASE_ASSERT(!info.alive());
1534         }
1535     }
1536 }
1537
1538 // If we are making type predictions about our arguments then
1539 // we need to check that they are correct on function entry.
1540 void SpeculativeJIT::checkArgumentTypes()
1541 {
1542     ASSERT(!m_currentNode);
1543     m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1544
1545     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1546         Node* node = m_jit.graph().m_arguments[i];
1547         if (!node) {
1548             // The argument is dead. We don't do any checks for such arguments.
1549             continue;
1550         }
1551         
1552         ASSERT(node->op() == SetArgument);
1553         ASSERT(node->shouldGenerate());
1554
1555         VariableAccessData* variableAccessData = node->variableAccessData();
1556         FlushFormat format = variableAccessData->flushFormat();
1557         
1558         if (format == FlushedJSValue)
1559             continue;
1560         
1561         VirtualRegister virtualRegister = variableAccessData->local();
1562
1563         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1564         
1565 #if USE(JSVALUE64)
1566         switch (format) {
1567         case FlushedInt32: {
1568             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1569             break;
1570         }
1571         case FlushedBoolean: {
1572             GPRTemporary temp(this);
1573             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1574             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1575             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1576             break;
1577         }
1578         case FlushedCell: {
1579             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1580             break;
1581         }
1582         default:
1583             RELEASE_ASSERT_NOT_REACHED();
1584             break;
1585         }
1586 #else
1587         switch (format) {
1588         case FlushedInt32: {
1589             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1590             break;
1591         }
1592         case FlushedBoolean: {
1593             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1594             break;
1595         }
1596         case FlushedCell: {
1597             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1598             break;
1599         }
1600         default:
1601             RELEASE_ASSERT_NOT_REACHED();
1602             break;
1603         }
1604 #endif
1605     }
1606
1607     m_origin = NodeOrigin();
1608 }
1609
1610 bool SpeculativeJIT::compile()
1611 {
1612     checkArgumentTypes();
1613     
1614     ASSERT(!m_currentNode);
1615     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1616         m_jit.setForBlockIndex(blockIndex);
1617         m_block = m_jit.graph().block(blockIndex);
1618         compileCurrentBlock();
1619     }
1620     linkBranches();
1621     return true;
1622 }
1623
1624 void SpeculativeJIT::createOSREntries()
1625 {
1626     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1627         BasicBlock* block = m_jit.graph().block(blockIndex);
1628         if (!block)
1629             continue;
1630         if (!block->isOSRTarget)
1631             continue;
1632         
1633         // Currently we don't have OSR entry trampolines. We could add them
1634         // here if need be.
1635         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1636     }
1637 }
1638
1639 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1640 {
1641     unsigned osrEntryIndex = 0;
1642     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1643         BasicBlock* block = m_jit.graph().block(blockIndex);
1644         if (!block)
1645             continue;
1646         if (!block->isOSRTarget)
1647             continue;
1648         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1649     }
1650     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1651     
1652     if (verboseCompilationEnabled()) {
1653         DumpContext dumpContext;
1654         dataLog("OSR Entries:\n");
1655         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1656             dataLog("    ", inContext(entryData, &dumpContext), "\n");
1657         if (!dumpContext.isEmpty())
1658             dumpContext.dump(WTF::dataFile());
1659     }
1660 }
1661
1662 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1663 {
1664     Edge child3 = m_jit.graph().varArgChild(node, 2);
1665     Edge child4 = m_jit.graph().varArgChild(node, 3);
1666
1667     ArrayMode arrayMode = node->arrayMode();
1668     
1669     GPRReg baseReg = base.gpr();
1670     GPRReg propertyReg = property.gpr();
1671     
1672     SpeculateDoubleOperand value(this, child3);
1673
1674     FPRReg valueReg = value.fpr();
1675     
1676     DFG_TYPE_CHECK(
1677         JSValueRegs(), child3, SpecFullRealNumber,
1678         m_jit.branchDouble(
1679             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1680     
1681     if (!m_compileOkay)
1682         return;
1683     
1684     StorageOperand storage(this, child4);
1685     GPRReg storageReg = storage.gpr();
1686
1687     if (node->op() == PutByValAlias) {
1688         // Store the value to the array.
1689         GPRReg propertyReg = property.gpr();
1690         FPRReg valueReg = value.fpr();
1691         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1692         
1693         noResult(m_currentNode);
1694         return;
1695     }
1696     
1697     GPRTemporary temporary;
1698     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1699
1700     MacroAssembler::Jump slowCase;
1701     
1702     if (arrayMode.isInBounds()) {
1703         speculationCheck(
1704             OutOfBounds, JSValueRegs(), 0,
1705             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1706     } else {
1707         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1708         
1709         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1710         
1711         if (!arrayMode.isOutOfBounds())
1712             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1713         
1714         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1715         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1716         
1717         inBounds.link(&m_jit);
1718     }
1719     
1720     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1721
1722     base.use();
1723     property.use();
1724     value.use();
1725     storage.use();
1726     
1727     if (arrayMode.isOutOfBounds()) {
1728         addSlowPathGenerator(
1729             slowPathCall(
1730                 slowCase, this,
1731                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1732                 NoResult, baseReg, propertyReg, valueReg));
1733     }
1734
1735     noResult(m_currentNode, UseChildrenCalledExplicitly);
1736 }
1737
1738 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1739 {
1740     SpeculateCellOperand string(this, node->child1());
1741     SpeculateStrictInt32Operand index(this, node->child2());
1742     StorageOperand storage(this, node->child3());
1743
1744     GPRReg stringReg = string.gpr();
1745     GPRReg indexReg = index.gpr();
1746     GPRReg storageReg = storage.gpr();
1747     
1748     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1749
1750     // unsigned comparison so we can filter out negative indices and indices that are too large
1751     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1752
1753     GPRTemporary scratch(this);
1754     GPRReg scratchReg = scratch.gpr();
1755
1756     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1757
1758     // Load the character into scratchReg
1759     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1760
1761     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1762     JITCompiler::Jump cont8Bit = m_jit.jump();
1763
1764     is16Bit.link(&m_jit);
1765
1766     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1767
1768     cont8Bit.link(&m_jit);
1769
1770     int32Result(scratchReg, m_currentNode);
1771 }
1772
1773 void SpeculativeJIT::compileGetByValOnString(Node* node)
1774 {
1775     SpeculateCellOperand base(this, node->child1());
1776     SpeculateStrictInt32Operand property(this, node->child2());
1777     StorageOperand storage(this, node->child3());
1778     GPRReg baseReg = base.gpr();
1779     GPRReg propertyReg = property.gpr();
1780     GPRReg storageReg = storage.gpr();
1781
1782     GPRTemporary scratch(this);
1783     GPRReg scratchReg = scratch.gpr();
1784 #if USE(JSVALUE32_64)
1785     GPRTemporary resultTag;
1786     GPRReg resultTagReg = InvalidGPRReg;
1787     if (node->arrayMode().isOutOfBounds()) {
1788         GPRTemporary realResultTag(this);
1789         resultTag.adopt(realResultTag);
1790         resultTagReg = resultTag.gpr();
1791     }
1792 #endif
1793
1794     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1795
1796     // unsigned comparison so we can filter out negative indices and indices that are too large
1797     JITCompiler::Jump outOfBounds = m_jit.branch32(
1798         MacroAssembler::AboveOrEqual, propertyReg,
1799         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1800     if (node->arrayMode().isInBounds())
1801         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1802
1803     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1804
1805     // Load the character into scratchReg
1806     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1807
1808     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1809     JITCompiler::Jump cont8Bit = m_jit.jump();
1810
1811     is16Bit.link(&m_jit);
1812
1813     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1814
1815     JITCompiler::Jump bigCharacter =
1816         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1817
1818     // 8 bit string values don't need the isASCII check.
1819     cont8Bit.link(&m_jit);
1820
1821     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1822     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1823     m_jit.loadPtr(scratchReg, scratchReg);
1824
1825     addSlowPathGenerator(
1826         slowPathCall(
1827             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1828
1829     if (node->arrayMode().isOutOfBounds()) {
1830 #if USE(JSVALUE32_64)
1831         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1832 #endif
1833
1834         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1835         if (globalObject->stringPrototypeChainIsSane()) {
1836             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
1837             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
1838             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
1839             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
1840             // indexed properties either.
1841             // https://bugs.webkit.org/show_bug.cgi?id=144668
1842             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
1843             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
1844             
1845 #if USE(JSVALUE64)
1846             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1847                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
1848 #else
1849             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1850                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
1851                 baseReg, propertyReg));
1852 #endif
1853         } else {
1854 #if USE(JSVALUE64)
1855             addSlowPathGenerator(
1856                 slowPathCall(
1857                     outOfBounds, this, operationGetByValStringInt,
1858                     scratchReg, baseReg, propertyReg));
1859 #else
1860             addSlowPathGenerator(
1861                 slowPathCall(
1862                     outOfBounds, this, operationGetByValStringInt,
1863                     resultTagReg, scratchReg, baseReg, propertyReg));
1864 #endif
1865         }
1866         
1867 #if USE(JSVALUE64)
1868         jsValueResult(scratchReg, m_currentNode);
1869 #else
1870         jsValueResult(resultTagReg, scratchReg, m_currentNode);
1871 #endif
1872     } else
1873         cellResult(scratchReg, m_currentNode);
1874 }
1875
1876 void SpeculativeJIT::compileFromCharCode(Node* node)
1877 {
1878     Edge& child = node->child1();
1879     if (child.useKind() == UntypedUse) {
1880         JSValueOperand opr(this, child);
1881         JSValueRegs oprRegs = opr.jsValueRegs();
1882 #if USE(JSVALUE64)
1883         GPRTemporary result(this);
1884         JSValueRegs resultRegs = JSValueRegs(result.gpr());
1885 #else
1886         GPRTemporary resultTag(this);
1887         GPRTemporary resultPayload(this);
1888         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
1889 #endif
1890         flushRegisters();
1891         callOperation(operationStringFromCharCodeUntyped, resultRegs, oprRegs);
1892         m_jit.exceptionCheck();
1893         
1894         jsValueResult(resultRegs, node);
1895         return;
1896     }
1897
1898     SpeculateStrictInt32Operand property(this, child);
1899     GPRReg propertyReg = property.gpr();
1900     GPRTemporary smallStrings(this);
1901     GPRTemporary scratch(this);
1902     GPRReg scratchReg = scratch.gpr();
1903     GPRReg smallStringsReg = smallStrings.gpr();
1904
1905     JITCompiler::JumpList slowCases;
1906     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
1907     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
1908     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
1909
1910     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
1911     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
1912     cellResult(scratchReg, m_currentNode);
1913 }
1914
1915 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
1916 {
1917     VirtualRegister virtualRegister = node->virtualRegister();
1918     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1919
1920     switch (info.registerFormat()) {
1921     case DataFormatStorage:
1922         RELEASE_ASSERT_NOT_REACHED();
1923
1924     case DataFormatBoolean:
1925     case DataFormatCell:
1926         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1927         return GeneratedOperandTypeUnknown;
1928
1929     case DataFormatNone:
1930     case DataFormatJSCell:
1931     case DataFormatJS:
1932     case DataFormatJSBoolean:
1933     case DataFormatJSDouble:
1934         return GeneratedOperandJSValue;
1935
1936     case DataFormatJSInt32:
1937     case DataFormatInt32:
1938         return GeneratedOperandInteger;
1939
1940     default:
1941         RELEASE_ASSERT_NOT_REACHED();
1942         return GeneratedOperandTypeUnknown;
1943     }
1944 }
1945
1946 void SpeculativeJIT::compileValueToInt32(Node* node)
1947 {
1948     switch (node->child1().useKind()) {
1949 #if USE(JSVALUE64)
1950     case Int52RepUse: {
1951         SpeculateStrictInt52Operand op1(this, node->child1());
1952         GPRTemporary result(this, Reuse, op1);
1953         GPRReg op1GPR = op1.gpr();
1954         GPRReg resultGPR = result.gpr();
1955         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
1956         int32Result(resultGPR, node, DataFormatInt32);
1957         return;
1958     }
1959 #endif // USE(JSVALUE64)
1960         
1961     case DoubleRepUse: {
1962         GPRTemporary result(this);
1963         SpeculateDoubleOperand op1(this, node->child1());
1964         FPRReg fpr = op1.fpr();
1965         GPRReg gpr = result.gpr();
1966         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
1967         
1968         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
1969         
1970         int32Result(gpr, node);
1971         return;
1972     }
1973     
1974     case NumberUse:
1975     case NotCellUse: {
1976         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
1977         case GeneratedOperandInteger: {
1978             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
1979             GPRTemporary result(this, Reuse, op1);
1980             m_jit.move(op1.gpr(), result.gpr());
1981             int32Result(result.gpr(), node, op1.format());
1982             return;
1983         }
1984         case GeneratedOperandJSValue: {
1985             GPRTemporary result(this);
1986 #if USE(JSVALUE64)
1987             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1988
1989             GPRReg gpr = op1.gpr();
1990             GPRReg resultGpr = result.gpr();
1991             FPRTemporary tempFpr(this);
1992             FPRReg fpr = tempFpr.fpr();
1993
1994             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
1995             JITCompiler::JumpList converted;
1996
1997             if (node->child1().useKind() == NumberUse) {
1998                 DFG_TYPE_CHECK(
1999                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
2000                     m_jit.branchTest64(
2001                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2002             } else {
2003                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2004                 
2005                 DFG_TYPE_CHECK(
2006                     JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
2007                 
2008                 // It's not a cell: so true turns into 1 and all else turns into 0.
2009                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2010                 converted.append(m_jit.jump());
2011                 
2012                 isNumber.link(&m_jit);
2013             }
2014
2015             // First, if we get here we have a double encoded as a JSValue
2016             unboxDouble(gpr, resultGpr, fpr);
2017
2018             silentSpillAllRegisters(resultGpr);
2019             callOperation(toInt32, resultGpr, fpr);
2020             silentFillAllRegisters(resultGpr);
2021
2022             converted.append(m_jit.jump());
2023
2024             isInteger.link(&m_jit);
2025             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2026
2027             converted.link(&m_jit);
2028 #else
2029             Node* childNode = node->child1().node();
2030             VirtualRegister virtualRegister = childNode->virtualRegister();
2031             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2032
2033             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2034
2035             GPRReg payloadGPR = op1.payloadGPR();
2036             GPRReg resultGpr = result.gpr();
2037         
2038             JITCompiler::JumpList converted;
2039
2040             if (info.registerFormat() == DataFormatJSInt32)
2041                 m_jit.move(payloadGPR, resultGpr);
2042             else {
2043                 GPRReg tagGPR = op1.tagGPR();
2044                 FPRTemporary tempFpr(this);
2045                 FPRReg fpr = tempFpr.fpr();
2046                 FPRTemporary scratch(this);
2047
2048                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2049
2050                 if (node->child1().useKind() == NumberUse) {
2051                     DFG_TYPE_CHECK(
2052                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2053                         m_jit.branch32(
2054                             MacroAssembler::AboveOrEqual, tagGPR,
2055                             TrustedImm32(JSValue::LowestTag)));
2056                 } else {
2057                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2058                     
2059                     DFG_TYPE_CHECK(
2060                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2061                         m_jit.branchIfCell(op1.jsValueRegs()));
2062                     
2063                     // It's not a cell: so true turns into 1 and all else turns into 0.
2064                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2065                     m_jit.move(TrustedImm32(0), resultGpr);
2066                     converted.append(m_jit.jump());
2067                     
2068                     isBoolean.link(&m_jit);
2069                     m_jit.move(payloadGPR, resultGpr);
2070                     converted.append(m_jit.jump());
2071                     
2072                     isNumber.link(&m_jit);
2073                 }
2074
2075                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2076
2077                 silentSpillAllRegisters(resultGpr);
2078                 callOperation(toInt32, resultGpr, fpr);
2079                 silentFillAllRegisters(resultGpr);
2080
2081                 converted.append(m_jit.jump());
2082
2083                 isInteger.link(&m_jit);
2084                 m_jit.move(payloadGPR, resultGpr);
2085
2086                 converted.link(&m_jit);
2087             }
2088 #endif
2089             int32Result(resultGpr, node);
2090             return;
2091         }
2092         case GeneratedOperandTypeUnknown:
2093             RELEASE_ASSERT(!m_compileOkay);
2094             return;
2095         }
2096         RELEASE_ASSERT_NOT_REACHED();
2097         return;
2098     }
2099     
2100     default:
2101         ASSERT(!m_compileOkay);
2102         return;
2103     }
2104 }
2105
2106 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2107 {
2108     if (doesOverflow(node->arithMode())) {
2109         // We know that this sometimes produces doubles. So produce a double every
2110         // time. This at least allows subsequent code to not have weird conditionals.
2111             
2112         SpeculateInt32Operand op1(this, node->child1());
2113         FPRTemporary result(this);
2114             
2115         GPRReg inputGPR = op1.gpr();
2116         FPRReg outputFPR = result.fpr();
2117             
2118         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2119             
2120         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2121         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2122         positive.link(&m_jit);
2123             
2124         doubleResult(outputFPR, node);
2125         return;
2126     }
2127     
2128     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2129
2130     SpeculateInt32Operand op1(this, node->child1());
2131     GPRTemporary result(this);
2132
2133     m_jit.move(op1.gpr(), result.gpr());
2134
2135     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2136
2137     int32Result(result.gpr(), node, op1.format());
2138 }
2139
2140 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2141 {
2142     SpeculateDoubleOperand op1(this, node->child1());
2143     FPRTemporary scratch(this);
2144     GPRTemporary result(this);
2145     
2146     FPRReg valueFPR = op1.fpr();
2147     FPRReg scratchFPR = scratch.fpr();
2148     GPRReg resultGPR = result.gpr();
2149
2150     JITCompiler::JumpList failureCases;
2151     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2152     m_jit.branchConvertDoubleToInt32(
2153         valueFPR, resultGPR, failureCases, scratchFPR,
2154         shouldCheckNegativeZero(node->arithMode()));
2155     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2156
2157     int32Result(resultGPR, node);
2158 }
2159
2160 void SpeculativeJIT::compileDoubleRep(Node* node)
2161 {
2162     switch (node->child1().useKind()) {
2163     case RealNumberUse: {
2164         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2165         FPRTemporary result(this);
2166         
2167         JSValueRegs op1Regs = op1.jsValueRegs();
2168         FPRReg resultFPR = result.fpr();
2169         
2170 #if USE(JSVALUE64)
2171         GPRTemporary temp(this);
2172         GPRReg tempGPR = temp.gpr();
2173         m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR);
2174 #else
2175         FPRTemporary temp(this);
2176         FPRReg tempFPR = temp.fpr();
2177         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2178 #endif
2179         
2180         JITCompiler::Jump done = m_jit.branchDouble(
2181             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2182         
2183         DFG_TYPE_CHECK(
2184             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2185         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2186         
2187         done.link(&m_jit);
2188         
2189         doubleResult(resultFPR, node);
2190         return;
2191     }
2192     
2193     case NotCellUse:
2194     case NumberUse: {
2195         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2196
2197         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2198         if (isInt32Speculation(possibleTypes)) {
2199             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2200             FPRTemporary result(this);
2201             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2202             doubleResult(result.fpr(), node);
2203             return;
2204         }
2205
2206         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2207         FPRTemporary result(this);
2208
2209 #if USE(JSVALUE64)
2210         GPRTemporary temp(this);
2211
2212         GPRReg op1GPR = op1.gpr();
2213         GPRReg tempGPR = temp.gpr();
2214         FPRReg resultFPR = result.fpr();
2215         JITCompiler::JumpList done;
2216
2217         JITCompiler::Jump isInteger = m_jit.branch64(
2218             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2219
2220         if (node->child1().useKind() == NotCellUse) {
2221             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2222             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2223
2224             static const double zero = 0;
2225             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2226
2227             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2228             done.append(isNull);
2229
2230             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2231                 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2232
2233             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2234             static const double one = 1;
2235             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2236             done.append(m_jit.jump());
2237             done.append(isFalse);
2238
2239             isUndefined.link(&m_jit);
2240             static const double NaN = PNaN;
2241             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2242             done.append(m_jit.jump());
2243
2244             isNumber.link(&m_jit);
2245         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2246             typeCheck(
2247                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2248                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2249         }
2250
2251         unboxDouble(op1GPR, tempGPR, resultFPR);
2252         done.append(m_jit.jump());
2253     
2254         isInteger.link(&m_jit);
2255         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2256         done.link(&m_jit);
2257 #else // USE(JSVALUE64) -> this is the 32_64 case
2258         FPRTemporary temp(this);
2259     
2260         GPRReg op1TagGPR = op1.tagGPR();
2261         GPRReg op1PayloadGPR = op1.payloadGPR();
2262         FPRReg tempFPR = temp.fpr();
2263         FPRReg resultFPR = result.fpr();
2264         JITCompiler::JumpList done;
2265     
2266         JITCompiler::Jump isInteger = m_jit.branch32(
2267             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2268
2269         if (node->child1().useKind() == NotCellUse) {
2270             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2271             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2272
2273             static const double zero = 0;
2274             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2275
2276             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2277             done.append(isNull);
2278
2279             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2280
2281             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2282             static const double one = 1;
2283             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2284             done.append(m_jit.jump());
2285             done.append(isFalse);
2286
2287             isUndefined.link(&m_jit);
2288             static const double NaN = PNaN;
2289             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2290             done.append(m_jit.jump());
2291
2292             isNumber.link(&m_jit);
2293         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2294             typeCheck(
2295                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2296                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2297         }
2298
2299         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2300         done.append(m_jit.jump());
2301     
2302         isInteger.link(&m_jit);
2303         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2304         done.link(&m_jit);
2305 #endif // USE(JSVALUE64)
2306     
2307         doubleResult(resultFPR, node);
2308         return;
2309     }
2310         
2311 #if USE(JSVALUE64)
2312     case Int52RepUse: {
2313         SpeculateStrictInt52Operand value(this, node->child1());
2314         FPRTemporary result(this);
2315         
2316         GPRReg valueGPR = value.gpr();
2317         FPRReg resultFPR = result.fpr();
2318
2319         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2320         
2321         doubleResult(resultFPR, node);
2322         return;
2323     }
2324 #endif // USE(JSVALUE64)
2325         
2326     default:
2327         RELEASE_ASSERT_NOT_REACHED();
2328         return;
2329     }
2330 }
2331
2332 void SpeculativeJIT::compileValueRep(Node* node)
2333 {
2334     switch (node->child1().useKind()) {
2335     case DoubleRepUse: {
2336         SpeculateDoubleOperand value(this, node->child1());
2337         JSValueRegsTemporary result(this);
2338         
2339         FPRReg valueFPR = value.fpr();
2340         JSValueRegs resultRegs = result.regs();
2341         
2342         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2343         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2344         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2345         // local was purified.
2346         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2347             m_jit.purifyNaN(valueFPR);
2348
2349         boxDouble(valueFPR, resultRegs);
2350         
2351         jsValueResult(resultRegs, node);
2352         return;
2353     }
2354         
2355 #if USE(JSVALUE64)
2356     case Int52RepUse: {
2357         SpeculateStrictInt52Operand value(this, node->child1());
2358         GPRTemporary result(this);
2359         
2360         GPRReg valueGPR = value.gpr();
2361         GPRReg resultGPR = result.gpr();
2362         
2363         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2364         
2365         jsValueResult(resultGPR, node);
2366         return;
2367     }
2368 #endif // USE(JSVALUE64)
2369         
2370     default:
2371         RELEASE_ASSERT_NOT_REACHED();
2372         return;
2373     }
2374 }
2375
2376 static double clampDoubleToByte(double d)
2377 {
2378     d += 0.5;
2379     if (!(d > 0))
2380         d = 0;
2381     else if (d > 255)
2382         d = 255;
2383     return d;
2384 }
2385
2386 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2387 {
2388     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2389     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2390     jit.xorPtr(result, result);
2391     MacroAssembler::Jump clamped = jit.jump();
2392     tooBig.link(&jit);
2393     jit.move(JITCompiler::TrustedImm32(255), result);
2394     clamped.link(&jit);
2395     inBounds.link(&jit);
2396 }
2397
2398 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2399 {
2400     // Unordered compare so we pick up NaN
2401     static const double zero = 0;
2402     static const double byteMax = 255;
2403     static const double half = 0.5;
2404     jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2405     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2406     jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2407     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2408     
2409     jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2410     // FIXME: This should probably just use a floating point round!
2411     // https://bugs.webkit.org/show_bug.cgi?id=72054
2412     jit.addDouble(source, scratch);
2413     jit.truncateDoubleToInt32(scratch, result);   
2414     MacroAssembler::Jump truncatedInt = jit.jump();
2415     
2416     tooSmall.link(&jit);
2417     jit.xorPtr(result, result);
2418     MacroAssembler::Jump zeroed = jit.jump();
2419     
2420     tooBig.link(&jit);
2421     jit.move(JITCompiler::TrustedImm32(255), result);
2422     
2423     truncatedInt.link(&jit);
2424     zeroed.link(&jit);
2425
2426 }
2427
2428 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2429 {
2430     if (node->op() == PutByValAlias)
2431         return JITCompiler::Jump();
2432     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2433         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2434     if (view) {
2435         uint32_t length = view->length();
2436         Node* indexNode = m_jit.graph().child(node, 1).node();
2437         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2438             return JITCompiler::Jump();
2439         return m_jit.branch32(
2440             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2441     }
2442     return m_jit.branch32(
2443         MacroAssembler::AboveOrEqual, indexGPR,
2444         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2445 }
2446
2447 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2448 {
2449     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2450     if (!jump.isSet())
2451         return;
2452     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2453 }
2454
2455 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2456 {
2457     ASSERT(isInt(type));
2458     
2459     SpeculateCellOperand base(this, node->child1());
2460     SpeculateStrictInt32Operand property(this, node->child2());
2461     StorageOperand storage(this, node->child3());
2462
2463     GPRReg baseReg = base.gpr();
2464     GPRReg propertyReg = property.gpr();
2465     GPRReg storageReg = storage.gpr();
2466
2467     GPRTemporary result(this);
2468     GPRReg resultReg = result.gpr();
2469
2470     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2471
2472     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2473     switch (elementSize(type)) {
2474     case 1:
2475         if (isSigned(type))
2476             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2477         else
2478             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2479         break;
2480     case 2:
2481         if (isSigned(type))
2482             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2483         else
2484             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2485         break;
2486     case 4:
2487         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2488         break;
2489     default:
2490         CRASH();
2491     }
2492     if (elementSize(type) < 4 || isSigned(type)) {
2493         int32Result(resultReg, node);
2494         return;
2495     }
2496     
2497     ASSERT(elementSize(type) == 4 && !isSigned(type));
2498     if (node->shouldSpeculateInt32()) {
2499         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2500         int32Result(resultReg, node);
2501         return;
2502     }
2503     
2504 #if USE(JSVALUE64)
2505     if (node->shouldSpeculateMachineInt()) {
2506         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2507         strictInt52Result(resultReg, node);
2508         return;
2509     }
2510 #endif
2511     
2512     FPRTemporary fresult(this);
2513     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2514     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2515     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2516     positive.link(&m_jit);
2517     doubleResult(fresult.fpr(), node);
2518 }
2519
2520 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2521 {
2522     ASSERT(isInt(type));
2523     
2524     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2525     GPRReg storageReg = storage.gpr();
2526     
2527     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2528     
2529     GPRTemporary value;
2530     GPRReg valueGPR = InvalidGPRReg;
2531     
2532     if (valueUse->isConstant()) {
2533         JSValue jsValue = valueUse->asJSValue();
2534         if (!jsValue.isNumber()) {
2535             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2536             noResult(node);
2537             return;
2538         }
2539         double d = jsValue.asNumber();
2540         if (isClamped(type)) {
2541             ASSERT(elementSize(type) == 1);
2542             d = clampDoubleToByte(d);
2543         }
2544         GPRTemporary scratch(this);
2545         GPRReg scratchReg = scratch.gpr();
2546         m_jit.move(Imm32(toInt32(d)), scratchReg);
2547         value.adopt(scratch);
2548         valueGPR = scratchReg;
2549     } else {
2550         switch (valueUse.useKind()) {
2551         case Int32Use: {
2552             SpeculateInt32Operand valueOp(this, valueUse);
2553             GPRTemporary scratch(this);
2554             GPRReg scratchReg = scratch.gpr();
2555             m_jit.move(valueOp.gpr(), scratchReg);
2556             if (isClamped(type)) {
2557                 ASSERT(elementSize(type) == 1);
2558                 compileClampIntegerToByte(m_jit, scratchReg);
2559             }
2560             value.adopt(scratch);
2561             valueGPR = scratchReg;
2562             break;
2563         }
2564             
2565 #if USE(JSVALUE64)
2566         case Int52RepUse: {
2567             SpeculateStrictInt52Operand valueOp(this, valueUse);
2568             GPRTemporary scratch(this);
2569             GPRReg scratchReg = scratch.gpr();
2570             m_jit.move(valueOp.gpr(), scratchReg);
2571             if (isClamped(type)) {
2572                 ASSERT(elementSize(type) == 1);
2573                 MacroAssembler::Jump inBounds = m_jit.branch64(
2574                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2575                 MacroAssembler::Jump tooBig = m_jit.branch64(
2576                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2577                 m_jit.move(TrustedImm32(0), scratchReg);
2578                 MacroAssembler::Jump clamped = m_jit.jump();
2579                 tooBig.link(&m_jit);
2580                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2581                 clamped.link(&m_jit);
2582                 inBounds.link(&m_jit);
2583             }
2584             value.adopt(scratch);
2585             valueGPR = scratchReg;
2586             break;
2587         }
2588 #endif // USE(JSVALUE64)
2589             
2590         case DoubleRepUse: {
2591             if (isClamped(type)) {
2592                 ASSERT(elementSize(type) == 1);
2593                 SpeculateDoubleOperand valueOp(this, valueUse);
2594                 GPRTemporary result(this);
2595                 FPRTemporary floatScratch(this);
2596                 FPRReg fpr = valueOp.fpr();
2597                 GPRReg gpr = result.gpr();
2598                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2599                 value.adopt(result);
2600                 valueGPR = gpr;
2601             } else {
2602                 SpeculateDoubleOperand valueOp(this, valueUse);
2603                 GPRTemporary result(this);
2604                 FPRReg fpr = valueOp.fpr();
2605                 GPRReg gpr = result.gpr();
2606                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2607                 m_jit.xorPtr(gpr, gpr);
2608                 MacroAssembler::Jump fixed = m_jit.jump();
2609                 notNaN.link(&m_jit);
2610                 
2611                 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2612                     fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2613                 
2614                 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
2615                 
2616                 fixed.link(&m_jit);
2617                 value.adopt(result);
2618                 valueGPR = gpr;
2619             }
2620             break;
2621         }
2622             
2623         default:
2624             RELEASE_ASSERT_NOT_REACHED();
2625             break;
2626         }
2627     }
2628     
2629     ASSERT_UNUSED(valueGPR, valueGPR != property);
2630     ASSERT(valueGPR != base);
2631     ASSERT(valueGPR != storageReg);
2632     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2633     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2634         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2635         outOfBounds = MacroAssembler::Jump();
2636     }
2637
2638     switch (elementSize(type)) {
2639     case 1:
2640         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2641         break;
2642     case 2:
2643         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2644         break;
2645     case 4:
2646         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2647         break;
2648     default:
2649         CRASH();
2650     }
2651     if (outOfBounds.isSet())
2652         outOfBounds.link(&m_jit);
2653     noResult(node);
2654 }
2655
2656 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2657 {
2658     ASSERT(isFloat(type));
2659     
2660     SpeculateCellOperand base(this, node->child1());
2661     SpeculateStrictInt32Operand property(this, node->child2());
2662     StorageOperand storage(this, node->child3());
2663
2664     GPRReg baseReg = base.gpr();
2665     GPRReg propertyReg = property.gpr();
2666     GPRReg storageReg = storage.gpr();
2667
2668     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2669
2670     FPRTemporary result(this);
2671     FPRReg resultReg = result.fpr();
2672     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2673     switch (elementSize(type)) {
2674     case 4:
2675         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2676         m_jit.convertFloatToDouble(resultReg, resultReg);
2677         break;
2678     case 8: {
2679         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2680         break;
2681     }
2682     default:
2683         RELEASE_ASSERT_NOT_REACHED();
2684     }
2685     
2686     doubleResult(resultReg, node);
2687 }
2688
2689 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2690 {
2691     ASSERT(isFloat(type));
2692     
2693     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2694     GPRReg storageReg = storage.gpr();
2695     
2696     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2697     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2698
2699     SpeculateDoubleOperand valueOp(this, valueUse);
2700     FPRTemporary scratch(this);
2701     FPRReg valueFPR = valueOp.fpr();
2702     FPRReg scratchFPR = scratch.fpr();
2703
2704     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2705     
2706     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2707     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2708         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2709         outOfBounds = MacroAssembler::Jump();
2710     }
2711     
2712     switch (elementSize(type)) {
2713     case 4: {
2714         m_jit.moveDouble(valueFPR, scratchFPR);
2715         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2716         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2717         break;
2718     }
2719     case 8:
2720         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2721         break;
2722     default:
2723         RELEASE_ASSERT_NOT_REACHED();
2724     }
2725     if (outOfBounds.isSet())
2726         outOfBounds.link(&m_jit);
2727     noResult(node);
2728 }
2729
2730 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2731 {
2732     // Check that prototype is an object.
2733     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
2734     
2735     // Initialize scratchReg with the value being checked.
2736     m_jit.move(valueReg, scratchReg);
2737     
2738     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2739     MacroAssembler::Label loop(&m_jit);
2740     MacroAssembler::Jump performDefaultHasInstance = m_jit.branch8(MacroAssembler::Equal,
2741         MacroAssembler::Address(scratchReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType));
2742     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2743     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2744     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2745 #if USE(JSVALUE64)
2746     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2747 #else
2748     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2749 #endif
2750     
2751     // No match - result is false.
2752 #if USE(JSVALUE64)
2753     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2754 #else
2755     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2756 #endif
2757     MacroAssembler::JumpList doneJumps; 
2758     doneJumps.append(m_jit.jump());
2759
2760     performDefaultHasInstance.link(&m_jit);
2761     silentSpillAllRegisters(scratchReg);
2762     callOperation(operationDefaultHasInstance, scratchReg, valueReg, prototypeReg); 
2763     silentFillAllRegisters(scratchReg);
2764     m_jit.exceptionCheck();
2765 #if USE(JSVALUE64)
2766     m_jit.or32(TrustedImm32(ValueFalse), scratchReg);
2767 #endif
2768     doneJumps.append(m_jit.jump());
2769     
2770     isInstance.link(&m_jit);
2771 #if USE(JSVALUE64)
2772     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2773 #else
2774     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2775 #endif
2776     
2777     doneJumps.link(&m_jit);
2778 }
2779
2780 void SpeculativeJIT::compileCheckTypeInfoFlags(Node* node)
2781 {
2782     SpeculateCellOperand base(this, node->child1());
2783
2784     GPRReg baseGPR = base.gpr();
2785
2786     speculationCheck(BadTypeInfoFlags, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(node->typeInfoOperand())));
2787
2788     noResult(node);
2789 }
2790
2791 void SpeculativeJIT::compileInstanceOf(Node* node)
2792 {
2793     if (node->child1().useKind() == UntypedUse) {
2794         // It might not be a cell. Speculate less aggressively.
2795         // Or: it might only be used once (i.e. by us), so we get zero benefit
2796         // from speculating any more aggressively than we absolutely need to.
2797         
2798         JSValueOperand value(this, node->child1());
2799         SpeculateCellOperand prototype(this, node->child2());
2800         GPRTemporary scratch(this);
2801         GPRTemporary scratch2(this);
2802         
2803         GPRReg prototypeReg = prototype.gpr();
2804         GPRReg scratchReg = scratch.gpr();
2805         GPRReg scratch2Reg = scratch2.gpr();
2806         
2807         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
2808         GPRReg valueReg = value.jsValueRegs().payloadGPR();
2809         moveFalseTo(scratchReg);
2810
2811         MacroAssembler::Jump done = m_jit.jump();
2812         
2813         isCell.link(&m_jit);
2814         
2815         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2816         
2817         done.link(&m_jit);
2818
2819         blessedBooleanResult(scratchReg, node);
2820         return;
2821     }
2822     
2823     SpeculateCellOperand value(this, node->child1());
2824     SpeculateCellOperand prototype(this, node->child2());
2825     
2826     GPRTemporary scratch(this);
2827     GPRTemporary scratch2(this);
2828     
2829     GPRReg valueReg = value.gpr();
2830     GPRReg prototypeReg = prototype.gpr();
2831     GPRReg scratchReg = scratch.gpr();
2832     GPRReg scratch2Reg = scratch2.gpr();
2833     
2834     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2835
2836     blessedBooleanResult(scratchReg, node);
2837 }
2838
2839 template<typename SnippetGenerator, J_JITOperation_EJJ snippetSlowPathFunction>
2840 void SpeculativeJIT::emitUntypedBitOp(Node* node)
2841 {
2842     Edge& leftChild = node->child1();
2843     Edge& rightChild = node->child2();
2844
2845     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
2846         JSValueOperand left(this, leftChild);
2847         JSValueOperand right(this, rightChild);
2848         JSValueRegs leftRegs = left.jsValueRegs();
2849         JSValueRegs rightRegs = right.jsValueRegs();
2850 #if USE(JSVALUE64)
2851         GPRTemporary result(this);
2852         JSValueRegs resultRegs = JSValueRegs(result.gpr());
2853 #else
2854         GPRTemporary resultTag(this);
2855         GPRTemporary resultPayload(this);
2856         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2857 #endif
2858         flushRegisters();
2859         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
2860         m_jit.exceptionCheck();
2861
2862         jsValueResult(resultRegs, node);
2863         return;
2864     }
2865
2866     Optional<JSValueOperand> left;
2867     Optional<JSValueOperand> right;
2868
2869     JSValueRegs leftRegs;
2870     JSValueRegs rightRegs;
2871
2872 #if USE(JSVALUE64)
2873     GPRTemporary result(this);
2874     JSValueRegs resultRegs = JSValueRegs(result.gpr());
2875     GPRTemporary scratch(this);
2876     GPRReg scratchGPR = scratch.gpr();
2877 #else
2878     GPRTemporary resultTag(this);
2879     GPRTemporary resultPayload(this);
2880     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2881     GPRReg scratchGPR = resultTag.gpr();
2882 #endif
2883
2884     SnippetOperand leftOperand;
2885     SnippetOperand rightOperand;
2886
2887     // The snippet generator does not support both operands being constant. If the left
2888     // operand is already const, we'll ignore the right operand's constness.
2889     if (leftChild->isInt32Constant())
2890         leftOperand.setConstInt32(leftChild->asInt32());
2891     else if (rightChild->isInt32Constant())
2892         rightOperand.setConstInt32(rightChild->asInt32());
2893
2894     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
2895
2896     if (!leftOperand.isConst()) {
2897         left = JSValueOperand(this, leftChild);
2898         leftRegs = left->jsValueRegs();
2899     }
2900     if (!rightOperand.isConst()) {
2901         right = JSValueOperand(this, rightChild);
2902         rightRegs = right->jsValueRegs();
2903     }
2904
2905     SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
2906     gen.generateFastPath(m_jit);
2907
2908     ASSERT(gen.didEmitFastPath());
2909     gen.endJumpList().append(m_jit.jump());
2910
2911     gen.slowPathJumpList().link(&m_jit);
2912     silentSpillAllRegisters(resultRegs);
2913
2914     if (leftOperand.isConst()) {
2915         leftRegs = resultRegs;
2916         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
2917     } else if (rightOperand.isConst()) {
2918         rightRegs = resultRegs;
2919         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
2920     }
2921
2922     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
2923
2924     silentFillAllRegisters(resultRegs);
2925     m_jit.exceptionCheck();
2926
2927     gen.endJumpList().link(&m_jit);
2928     jsValueResult(resultRegs, node);
2929 }
2930
2931 void SpeculativeJIT::compileBitwiseOp(Node* node)
2932 {
2933     NodeType op = node->op();
2934     Edge& leftChild = node->child1();
2935     Edge& rightChild = node->child2();
2936
2937     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
2938         switch (op) {
2939         case BitAnd:
2940             emitUntypedBitOp<JITBitAndGenerator, operationValueBitAnd>(node);
2941             return;
2942         case BitOr:
2943             emitUntypedBitOp<JITBitOrGenerator, operationValueBitOr>(node);
2944             return;
2945         case BitXor:
2946             emitUntypedBitOp<JITBitXorGenerator, operationValueBitXor>(node);
2947             return;
2948         default:
2949             RELEASE_ASSERT_NOT_REACHED();
2950         }
2951     }
2952
2953     if (leftChild->isInt32Constant()) {
2954         SpeculateInt32Operand op2(this, rightChild);
2955         GPRTemporary result(this, Reuse, op2);
2956
2957         bitOp(op, leftChild->asInt32(), op2.gpr(), result.gpr());
2958
2959         int32Result(result.gpr(), node);
2960
2961     } else if (rightChild->isInt32Constant()) {
2962         SpeculateInt32Operand op1(this, leftChild);
2963         GPRTemporary result(this, Reuse, op1);
2964
2965         bitOp(op, rightChild->asInt32(), op1.gpr(), result.gpr());
2966
2967         int32Result(result.gpr(), node);
2968
2969     } else {
2970         SpeculateInt32Operand op1(this, leftChild);
2971         SpeculateInt32Operand op2(this, rightChild);
2972         GPRTemporary result(this, Reuse, op1, op2);
2973         
2974         GPRReg reg1 = op1.gpr();
2975         GPRReg reg2 = op2.gpr();
2976         bitOp(op, reg1, reg2, result.gpr());
2977         
2978         int32Result(result.gpr(), node);
2979     }
2980 }
2981
2982 void SpeculativeJIT::emitUntypedRightShiftBitOp(Node* node)
2983 {
2984     J_JITOperation_EJJ snippetSlowPathFunction = node->op() == BitRShift
2985         ? operationValueBitRShift : operationValueBitURShift;
2986     JITRightShiftGenerator::ShiftType shiftType = node->op() == BitRShift
2987         ? JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift;
2988
2989     Edge& leftChild = node->child1();
2990     Edge& rightChild = node->child2();
2991
2992     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
2993         JSValueOperand left(this, leftChild);
2994         JSValueOperand right(this, rightChild);
2995         JSValueRegs leftRegs = left.jsValueRegs();
2996         JSValueRegs rightRegs = right.jsValueRegs();
2997 #if USE(JSVALUE64)
2998         GPRTemporary result(this);
2999         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3000 #else
3001         GPRTemporary resultTag(this);
3002         GPRTemporary resultPayload(this);
3003         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3004 #endif
3005         flushRegisters();
3006         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3007         m_jit.exceptionCheck();
3008
3009         jsValueResult(resultRegs, node);
3010         return;
3011     }
3012
3013     Optional<JSValueOperand> left;
3014     Optional<JSValueOperand> right;
3015
3016     JSValueRegs leftRegs;
3017     JSValueRegs rightRegs;
3018
3019     FPRTemporary leftNumber(this);
3020     FPRReg leftFPR = leftNumber.fpr();
3021
3022 #if USE(JSVALUE64)
3023     GPRTemporary result(this);
3024     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3025     GPRTemporary scratch(this);
3026     GPRReg scratchGPR = scratch.gpr();
3027     FPRReg scratchFPR = InvalidFPRReg;
3028 #else
3029     GPRTemporary resultTag(this);
3030     GPRTemporary resultPayload(this);
3031     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3032     GPRReg scratchGPR = resultTag.gpr();
3033     FPRTemporary fprScratch(this);
3034     FPRReg scratchFPR = fprScratch.fpr();
3035 #endif
3036
3037     SnippetOperand leftOperand;
3038     SnippetOperand rightOperand;
3039
3040     // The snippet generator does not support both operands being constant. If the left
3041     // operand is already const, we'll ignore the right operand's constness.
3042     if (leftChild->isInt32Constant())
3043         leftOperand.setConstInt32(leftChild->asInt32());
3044     else if (rightChild->isInt32Constant())
3045         rightOperand.setConstInt32(rightChild->asInt32());
3046
3047     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3048
3049     if (!leftOperand.isConst()) {
3050         left = JSValueOperand(this, leftChild);
3051         leftRegs = left->jsValueRegs();
3052     }
3053     if (!rightOperand.isConst()) {
3054         right = JSValueOperand(this, rightChild);
3055         rightRegs = right->jsValueRegs();
3056     }
3057
3058     JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3059         leftFPR, scratchGPR, scratchFPR, shiftType);
3060     gen.generateFastPath(m_jit);
3061
3062     ASSERT(gen.didEmitFastPath());
3063     gen.endJumpList().append(m_jit.jump());
3064
3065     gen.slowPathJumpList().link(&m_jit);
3066     silentSpillAllRegisters(resultRegs);
3067
3068     if (leftOperand.isConst()) {
3069         leftRegs = resultRegs;
3070         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3071     } else if (rightOperand.isConst()) {
3072         rightRegs = resultRegs;
3073         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3074     }
3075
3076     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3077
3078     silentFillAllRegisters(resultRegs);
3079     m_jit.exceptionCheck();
3080
3081     gen.endJumpList().link(&m_jit);
3082     jsValueResult(resultRegs, node);
3083     return;
3084 }
3085
3086 void SpeculativeJIT::compileShiftOp(Node* node)
3087 {
3088     NodeType op = node->op();
3089     Edge& leftChild = node->child1();
3090     Edge& rightChild = node->child2();
3091
3092     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3093         switch (op) {
3094         case BitLShift:
3095             emitUntypedBitOp<JITLeftShiftGenerator, operationValueBitLShift>(node);
3096             return;
3097         case BitRShift:
3098         case BitURShift:
3099             emitUntypedRightShiftBitOp(node);
3100             return;
3101         default:
3102             RELEASE_ASSERT_NOT_REACHED();
3103         }
3104     }
3105
3106     if (rightChild->isInt32Constant()) {
3107         SpeculateInt32Operand op1(this, leftChild);
3108         GPRTemporary result(this, Reuse, op1);
3109
3110         shiftOp(op, op1.gpr(), rightChild->asInt32() & 0x1f, result.gpr());
3111
3112         int32Result(result.gpr(), node);
3113     } else {
3114         // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
3115         SpeculateInt32Operand op1(this, leftChild);
3116         SpeculateInt32Operand op2(this, rightChild);
3117         GPRTemporary result(this, Reuse, op1);
3118
3119         GPRReg reg1 = op1.gpr();
3120         GPRReg reg2 = op2.gpr();
3121         shiftOp(op, reg1, reg2, result.gpr());
3122
3123         int32Result(result.gpr(), node);
3124     }
3125 }
3126
3127 void SpeculativeJIT::compileValueAdd(Node* node)
3128 {
3129     Edge& leftChild = node->child1();
3130     Edge& rightChild = node->child2();
3131
3132     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3133         JSValueOperand left(this, leftChild);
3134         JSValueOperand right(this, rightChild);
3135         JSValueRegs leftRegs = left.jsValueRegs();
3136         JSValueRegs rightRegs = right.jsValueRegs();
3137 #if USE(JSVALUE64)
3138         GPRTemporary result(this);
3139         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3140 #else
3141         GPRTemporary resultTag(this);
3142         GPRTemporary resultPayload(this);
3143         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3144 #endif
3145         flushRegisters();
3146         callOperation(operationValueAddNotNumber, resultRegs, leftRegs, rightRegs);
3147         m_jit.exceptionCheck();
3148     
3149         jsValueResult(resultRegs, node);
3150         return;
3151     }
3152
3153     Optional<JSValueOperand> left;
3154     Optional<JSValueOperand> right;
3155
3156     JSValueRegs leftRegs;
3157     JSValueRegs rightRegs;
3158
3159     FPRTemporary leftNumber(this);
3160     FPRTemporary rightNumber(this);
3161     FPRReg leftFPR = leftNumber.fpr();
3162     FPRReg rightFPR = rightNumber.fpr();
3163
3164 #if USE(JSVALUE64)
3165     GPRTemporary result(this);
3166     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3167     GPRTemporary scratch(this);
3168     GPRReg scratchGPR = scratch.gpr();
3169     FPRReg scratchFPR = InvalidFPRReg;
3170 #else
3171     GPRTemporary resultTag(this);
3172     GPRTemporary resultPayload(this);
3173     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3174     GPRReg scratchGPR = resultTag.gpr();
3175     FPRTemporary fprScratch(this);
3176     FPRReg scratchFPR = fprScratch.fpr();
3177 #endif
3178
3179     SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3180     SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3181
3182     // The snippet generator does not support both operands being constant. If the left
3183     // operand is already const, we'll ignore the right operand's constness.
3184     if (leftChild->isInt32Constant())
3185         leftOperand.setConstInt32(leftChild->asInt32());
3186     else if (rightChild->isInt32Constant())
3187         rightOperand.setConstInt32(rightChild->asInt32());
3188
3189     ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3190
3191     if (!leftOperand.isConst()) {
3192         left = JSValueOperand(this, leftChild);
3193         leftRegs = left->jsValueRegs();
3194     }
3195     if (!rightOperand.isConst()) {
3196         right = JSValueOperand(this, rightChild);
3197         rightRegs = right->jsValueRegs();
3198     }
3199
3200     JITAddGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3201         leftFPR, rightFPR, scratchGPR, scratchFPR);
3202     gen.generateFastPath(m_jit);
3203
3204     ASSERT(gen.didEmitFastPath());
3205     gen.endJumpList().append(m_jit.jump());
3206
3207     gen.slowPathJumpList().link(&m_jit);
3208
3209     silentSpillAllRegisters(resultRegs);
3210
3211     if (leftOperand.isConst()) {
3212         leftRegs = resultRegs;
3213         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3214     } else if (rightOperand.isConst()) {
3215         rightRegs = resultRegs;
3216         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3217     }
3218
3219     callOperation(operationValueAdd, resultRegs, leftRegs, rightRegs);
3220
3221     silentFillAllRegisters(resultRegs);
3222     m_jit.exceptionCheck();
3223
3224     gen.endJumpList().link(&m_jit);
3225     jsValueResult(resultRegs, node);
3226     return;
3227 }
3228
3229 void SpeculativeJIT::compileInstanceOfCustom(Node* node)
3230 {
3231     // We could do something smarter here but this case is currently super rare and unless
3232     // Symbol.hasInstance becomes popular will likely remain that way.
3233
3234     JSValueOperand value(this, node->child1());
3235     SpeculateCellOperand constructor(this, node->child2());
3236     JSValueOperand hasInstanceValue(this, node->child3());
3237     GPRTemporary result(this);
3238
3239     JSValueRegs valueRegs = value.jsValueRegs();
3240     GPRReg constructorGPR = constructor.gpr();
3241     JSValueRegs hasInstanceRegs = hasInstanceValue.jsValueRegs();
3242     GPRReg resultGPR = result.gpr();
3243
3244     MacroAssembler::Jump slowCase = m_jit.jump();
3245
3246     addSlowPathGenerator(slowPathCall(slowCase, this, operationInstanceOfCustom, resultGPR, valueRegs, constructorGPR, hasInstanceRegs));
3247
3248     unblessedBooleanResult(resultGPR, node);
3249 }
3250
3251 void SpeculativeJIT::compileArithAdd(Node* node)
3252 {
3253     switch (node->binaryUseKind()) {
3254     case Int32Use: {
3255         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3256
3257         if (node->child2()->isInt32Constant()) {
3258             SpeculateInt32Operand op1(this, node->child1());
3259             int32_t imm2 = node->child2()->asInt32();
3260
3261             if (!shouldCheckOverflow(node->arithMode())) {
3262                 GPRTemporary result(this, Reuse, op1);
3263                 m_jit.add32(Imm32(imm2), op1.gpr(), result.gpr());
3264                 int32Result(result.gpr(), node);
3265                 return;
3266             }
3267
3268             GPRTemporary result(this);
3269             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
3270
3271             int32Result(result.gpr(), node);
3272             return;
3273         }
3274                 
3275         SpeculateInt32Operand op1(this, node->child1());
3276         SpeculateInt32Operand op2(this, node->child2());
3277         GPRTemporary result(this, Reuse, op1, op2);
3278
3279         GPRReg gpr1 = op1.gpr();
3280         GPRReg gpr2 = op2.gpr();
3281         GPRReg gprResult = result.gpr();
3282
3283         if (!shouldCheckOverflow(node->arithMode()))
3284             m_jit.add32(gpr1, gpr2, gprResult);
3285         else {
3286             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
3287                 
3288             if (gpr1 == gprResult)
3289                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
3290             else if (gpr2 == gprResult)
3291                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
3292             else
3293                 speculationCheck(Overflow, JSValueRegs(), 0, check);
3294         }
3295
3296         int32Result(gprResult, node);
3297         return;
3298     }
3299         
3300 #if USE(JSVALUE64)
3301     case Int52RepUse: {
3302         ASSERT(shouldCheckOverflow(node->arithMode()));
3303         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3304
3305         // Will we need an overflow check? If we can prove that neither input can be
3306         // Int52 then the overflow check will not be necessary.
3307         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
3308             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
3309             SpeculateWhicheverInt52Operand op1(this, node->child1());
3310             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3311             GPRTemporary result(this, Reuse, op1);
3312             m_jit.add64(op1.gpr(), op2.gpr(), result.gpr());
3313             int52Result(result.gpr(), node, op1.format());
3314             return;
3315         }
3316         
3317         SpeculateInt52Operand op1(this, node->child1());
3318         SpeculateInt52Operand op2(this, node->child2());
3319         GPRTemporary result(this);
3320         m_jit.move(op1.gpr(), result.gpr());
3321         speculationCheck(
3322             Int52Overflow, JSValueRegs(), 0,
3323             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3324         int52Result(result.gpr(), node);
3325         return;
3326     }
3327 #endif // USE(JSVALUE64)
3328     
3329     case DoubleRepUse: {
3330         SpeculateDoubleOperand op1(this, node->child1());
3331         SpeculateDoubleOperand op2(this, node->child2());
3332         FPRTemporary result(this, op1, op2);
3333
3334         FPRReg reg1 = op1.fpr();
3335         FPRReg reg2 = op2.fpr();
3336         m_jit.addDouble(reg1, reg2, result.fpr());
3337
3338         doubleResult(result.fpr(), node);
3339         return;
3340     }
3341         
3342     default:
3343         RELEASE_ASSERT_NOT_REACHED();
3344         break;
3345     }
3346 }
3347
3348 void SpeculativeJIT::compileMakeRope(Node* node)
3349 {
3350     ASSERT(node->child1().useKind() == KnownStringUse);
3351     ASSERT(node->child2().useKind() == KnownStringUse);
3352     ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
3353     
3354     SpeculateCellOperand op1(this, node->child1());
3355     SpeculateCellOperand op2(this, node->child2());
3356     SpeculateCellOperand op3(this, node->child3());
3357     GPRTemporary result(this);
3358     GPRTemporary allocator(this);
3359     GPRTemporary scratch(this);
3360     
3361     GPRReg opGPRs[3];
3362     unsigned numOpGPRs;
3363     opGPRs[0] = op1.gpr();
3364     opGPRs[1] = op2.gpr();
3365     if (node->child3()) {
3366         opGPRs[2] = op3.gpr();
3367         numOpGPRs = 3;
3368     } else {
3369         opGPRs[2] = InvalidGPRReg;
3370         numOpGPRs = 2;
3371     }
3372     GPRReg resultGPR = result.gpr();
3373     GPRReg allocatorGPR = allocator.gpr();
3374     GPRReg scratchGPR = scratch.gpr();
3375     
3376     JITCompiler::JumpList slowPath;
3377     MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithDestructor(sizeof(JSRopeString));
3378     m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
3379     emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
3380         
3381     m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
3382     for (unsigned i = 0; i < numOpGPRs; ++i)
3383         m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3384     for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
3385         m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3386     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
3387     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
3388     if (!ASSERT_DISABLED) {
3389         JITCompiler::Jump ok = m_jit.branch32(
3390             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
3391         m_jit.abortWithReason(DFGNegativeStringLength);
3392         ok.link(&m_jit);
3393     }
3394     for (unsigned i = 1; i < numOpGPRs; ++i) {
3395         m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
3396         speculationCheck(
3397             Uncountable, JSValueSource(), nullptr,
3398             m_jit.branchAdd32(
3399                 JITCompiler::Overflow,
3400                 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
3401     }
3402     m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
3403     m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
3404     if (!ASSERT_DISABLED) {
3405         JITCompiler::Jump ok = m_jit.branch32(
3406             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
3407         m_jit.abortWithReason(DFGNegativeStringLength);
3408         ok.link(&m_jit);
3409     }
3410     m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
3411     
3412     switch (numOpGPRs) {
3413     case 2:
3414         addSlowPathGenerator(slowPathCall(
3415             slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
3416         break;
3417     case 3:
3418         addSlowPathGenerator(slowPathCall(
3419             slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
3420         break;
3421     default:
3422         RELEASE_ASSERT_NOT_REACHED();
3423         break;
3424     }
3425         
3426     cellResult(resultGPR, node);
3427 }
3428
3429 void SpeculativeJIT::compileArithClz32(Node* node)
3430 {
3431     ASSERT_WITH_MESSAGE(node->child1().useKind() == Int32Use || node->child1().useKind() == KnownInt32Use, "The Fixup phase should have enforced a Int32 operand.");
3432     SpeculateInt32Operand value(this, node->child1());
3433     GPRTemporary result(this, Reuse, value);
3434     GPRReg valueReg = value.gpr();
3435     GPRReg resultReg = result.gpr();
3436     m_jit.countLeadingZeros32(valueReg, resultReg);
3437     int32Result(resultReg, node);
3438 }
3439
3440 void SpeculativeJIT::compileArithSub(Node* node)
3441 {
3442     switch (node->binaryUseKind()) {
3443     case Int32Use: {
3444         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3445         
3446         if (node->child2()->isInt32Constant()) {
3447             SpeculateInt32Operand op1(this, node->child1());
3448             int32_t imm2 = node->child2()->asInt32();
3449             GPRTemporary result(this);
3450
3451             if (!shouldCheckOverflow(node->arithMode())) {
3452                 m_jit.move(op1.gpr(), result.gpr());
3453                 m_jit.sub32(Imm32(imm2), result.gpr());
3454             } else {
3455                 GPRTemporary scratch(this);
3456                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
3457             }
3458
3459             int32Result(result.gpr(), node);
3460             return;
3461         }
3462             
3463         if (node->child1()->isInt32Constant()) {
3464             int32_t imm1 = node->child1()->asInt32();
3465             SpeculateInt32Operand op2(this, node->child2());
3466             GPRTemporary result(this);
3467                 
3468             m_jit.move(Imm32(imm1), result.gpr());
3469             if (!shouldCheckOverflow(node->arithMode()))
3470                 m_jit.sub32(op2.gpr(), result.gpr());
3471             else
3472                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3473                 
3474             int32Result(result.gpr(), node);
3475             return;
3476         }
3477             
3478         SpeculateInt32Operand op1(this, node->child1());
3479         SpeculateInt32Operand op2(this, node->child2());
3480         GPRTemporary result(this);
3481
3482         if (!shouldCheckOverflow(node->arithMode())) {
3483             m_jit.move(op1.gpr(), result.gpr());
3484             m_jit.sub32(op2.gpr(), result.gpr());
3485         } else
3486             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
3487
3488         int32Result(result.gpr(), node);
3489         return;
3490     }
3491         
3492 #if USE(JSVALUE64)
3493     case Int52RepUse: {
3494         ASSERT(shouldCheckOverflow(node->arithMode()));
3495         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3496
3497         // Will we need an overflow check? If we can prove that neither input can be
3498         // Int52 then the overflow check will not be necessary.
3499         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
3500             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
3501             SpeculateWhicheverInt52Operand op1(this, node->child1());
3502             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3503             GPRTemporary result(this, Reuse, op1);
3504             m_jit.move(op1.gpr(), result.gpr());
3505             m_jit.sub64(op2.gpr(), result.gpr());
3506             int52Result(result.gpr(), node, op1.format());
3507             return;
3508         }
3509         
3510         SpeculateInt52Operand op1(this, node->child1());
3511         SpeculateInt52Operand op2(this, node->child2());
3512         GPRTemporary result(this);
3513         m_jit.move(op1.gpr(), result.gpr());
3514         speculationCheck(
3515             Int52Overflow, JSValueRegs(), 0,
3516             m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3517         int52Result(result.gpr(), node);
3518         return;
3519     }
3520 #endif // USE(JSVALUE64)
3521
3522     case DoubleRepUse: {
3523         SpeculateDoubleOperand op1(this, node->child1());
3524         SpeculateDoubleOperand op2(this, node->child2());
3525         FPRTemporary result(this, op1);
3526
3527         FPRReg reg1 = op1.fpr();
3528         FPRReg reg2 = op2.fpr();
3529         m_jit.subDouble(reg1, reg2, result.fpr());
3530
3531         doubleResult(result.fpr(), node);
3532         return;
3533     }
3534
3535     case UntypedUse: {
3536         Edge& leftChild = node->child1();
3537         Edge& rightChild = node->child2();
3538
3539         JSValueOperand left(this, leftChild);
3540         JSValueOperand right(this, rightChild);
3541
3542         JSValueRegs leftRegs = left.jsValueRegs();
3543         JSValueRegs rightRegs = right.jsValueRegs();
3544
3545         FPRTemporary leftNumber(this);
3546         FPRTemporary rightNumber(this);
3547         FPRReg leftFPR = leftNumber.fpr();
3548         FPRReg rightFPR = rightNumber.fpr();
3549
3550 #if USE(JSVALUE64)
3551         GPRTemporary result(this);
3552         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3553         GPRTemporary scratch(this);
3554         GPRReg scratchGPR = scratch.gpr();
3555         FPRReg scratchFPR = InvalidFPRReg;
3556 #else
3557         GPRTemporary resultTag(this);
3558         GPRTemporary resultPayload(this);
3559         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3560         GPRReg scratchGPR = resultTag.gpr();
3561         FPRTemporary fprScratch(this);
3562         FPRReg scratchFPR = fprScratch.fpr();
3563 #endif
3564
3565         SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3566         SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3567
3568         JITSubGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3569             leftFPR, rightFPR, scratchGPR, scratchFPR);
3570         gen.generateFastPath(m_jit);
3571
3572         ASSERT(gen.didEmitFastPath());
3573         gen.endJumpList().append(m_jit.jump());
3574
3575         gen.slowPathJumpList().link(&m_jit);
3576         silentSpillAllRegisters(resultRegs);
3577         callOperation(operationValueSub, resultRegs, leftRegs, rightRegs);
3578         silentFillAllRegisters(resultRegs);
3579         m_jit.exceptionCheck();
3580
3581         gen.endJumpList().link(&m_jit);
3582         jsValueResult(resultRegs, node);
3583         return;
3584     }
3585
3586     default:
3587         RELEASE_ASSERT_NOT_REACHED();
3588         return;
3589     }
3590 }
3591
3592 void SpeculativeJIT::compileArithNegate(Node* node)
3593 {
3594     switch (node->child1().useKind()) {
3595     case Int32Use: {
3596         SpeculateInt32Operand op1(this, node->child1());
3597         GPRTemporary result(this);
3598
3599         m_jit.move(op1.gpr(), result.gpr());
3600
3601         // Note: there is no notion of being not used as a number, but someone
3602         // caring about negative zero.
3603         
3604         if (!shouldCheckOverflow(node->arithMode()))
3605             m_jit.neg32(result.gpr());
3606         else if (!shouldCheckNegativeZero(node->arithMode()))
3607             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
3608         else {
3609             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
3610             m_jit.neg32(result.gpr());
3611         }
3612
3613         int32Result(result.gpr(), node);
3614         return;
3615     }
3616
3617 #if USE(JSVALUE64)
3618     case Int52RepUse: {
3619         ASSERT(shouldCheckOverflow(node->arithMode()));
3620         
3621         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)) {
3622             SpeculateWhicheverInt52Operand op1(this, node->child1());
3623             GPRTemporary result(this);
3624             GPRReg op1GPR = op1.gpr();
3625             GPRReg resultGPR = result.gpr();
3626             m_jit.move(op1GPR, resultGPR);
3627             m_jit.neg64(resultGPR);
3628             if (shouldCheckNegativeZero(node->arithMode())) {
3629                 speculationCheck(
3630                     NegativeZero, JSValueRegs(), 0,
3631                     m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3632             }
3633             int52Result(resultGPR, node, op1.format());
3634             return;
3635         }
3636         
3637         SpeculateInt52Operand op1(this, node->child1());
3638         GPRTemporary result(this);
3639         GPRReg op1GPR = op1.gpr();
3640         GPRReg resultGPR = result.gpr();
3641         m_jit.move(op1GPR, resultGPR);
3642         speculationCheck(
3643             Int52Overflow, JSValueRegs(), 0,
3644             m_jit.branchNeg64(MacroAssembler::Overflow, resultGPR));
3645         if (shouldCheckNegativeZero(node->arithMode())) {
3646             speculationCheck(
3647                 NegativeZero, JSValueRegs(), 0,
3648                 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3649         }
3650         int52Result(resultGPR, node);
3651         return;
3652     }
3653 #endif // USE(JSVALUE64)
3654         
3655     case DoubleRepUse: {
3656         SpeculateDoubleOperand op1(this, node->child1());
3657         FPRTemporary result(this);
3658         
3659         m_jit.negateDouble(op1.fpr(), result.fpr());
3660         
3661         doubleResult(result.fpr(), node);
3662         return;
3663     }
3664         
3665     default:
3666         RELEASE_ASSERT_NOT_REACHED();
3667         return;
3668     }
3669 }
3670 void SpeculativeJIT::compileArithMul(Node* node)
3671 {
3672     switch (node->binaryUseKind()) {
3673     case Int32Use: {
3674         if (node->child2()->isInt32Constant()) {
3675             SpeculateInt32Operand op1(this, node->child1());
3676             GPRTemporary result(this);
3677
3678             int32_t imm = node->child2()->asInt32();
3679             GPRReg op1GPR = op1.gpr();
3680             GPRReg resultGPR = result.gpr();
3681
3682             if (!shouldCheckOverflow(node->arithMode()))
3683                 m_jit.mul32(Imm32(imm), op1GPR, resultGPR);
3684             else {
3685                 speculationCheck(Overflow, JSValueRegs(), 0,
3686                     m_jit.branchMul32(MacroAssembler::Overflow, op1GPR, Imm32(imm), resultGPR));
3687             }
3688
3689             // The only way to create negative zero with a constant is:
3690             // -negative-op1 * 0.
3691             // -zero-op1 * negative constant.
3692             if (shouldCheckNegativeZero(node->arithMode())) {
3693                 if (!imm)
3694                     speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Signed, op1GPR));
3695                 else if (imm < 0) {
3696                     if (shouldCheckOverflow(node->arithMode()))
3697                         speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, resultGPR));
3698                     else
3699                         speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, op1GPR));
3700                 }
3701             }
3702
3703             int32Result(resultGPR, node);
3704             return;
3705         }
3706         SpeculateInt32Operand op1(this, node->child1());
3707         SpeculateInt32Operand op2(this, node->child2());
3708         GPRTemporary result(this);
3709
3710         GPRReg reg1 = op1.gpr();
3711         GPRReg reg2 = op2.gpr();
3712
3713         // We can perform truncated multiplications if we get to this point, because if the
3714         // fixup phase could not prove that it would be safe, it would have turned us into
3715         // a double multiplication.
3716         if (!shouldCheckOverflow(node->arithMode())) {
3717             m_jit.move(reg1, result.gpr());
3718             m_jit.mul32(reg2, result.gpr());
3719         } else {
3720             speculationCheck(
3721                 Overflow, JSValueRegs(), 0,
3722                 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3723         }
3724             
3725         // Check for negative zero, if the users of this node care about such things.
3726         if (shouldCheckNegativeZero(node->arithMode())) {
3727             MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3728             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Signed, reg1));
3729             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Signed, reg2));
3730             resultNonZero.link(&m_jit);
3731         }
3732
3733         int32Result(result.gpr(), node);
3734         return;
3735     }
3736
3737 #if USE(JSVALUE64)
3738     case Int52RepUse: {
3739         ASSERT(shouldCheckOverflow(node->arithMode()));
3740         
3741         // This is super clever. We want to do an int52 multiplication and check the
3742         // int52 overflow bit. There is no direct hardware support for this, but we do
3743         // have the ability to do an int64 multiplication and check the int64 overflow
3744         // bit. We leverage that. Consider that a, b are int52 numbers inside int64
3745         // registers, with the high 12 bits being sign-extended. We can do:
3746         //
3747         //     (a * (b << 12))
3748         //
3749         // This will give us a left-shifted int52 (value is in high 52 bits, low 16
3750         // bits are zero) plus the int52 overflow bit. I.e. whether this 64-bit
3751         // multiplication overflows is identical to whether the 'a * b' 52-bit
3752         // multiplication overflows.
3753         //
3754         // In our nomenclature, this is:
3755         //
3756         //     strictInt52(a) * int52(b) => int52
3757         //
3758         // That is "strictInt52" means unshifted and "int52" means left-shifted by 16
3759         // bits.
3760         //
3761         // We don't care which of op1 or op2 serves as the left-shifted operand, so
3762         // we just do whatever is more convenient for op1 and have op2 do the
3763         // opposite. This ensures that we do at most one shift.
3764
3765         SpeculateWhicheverInt52Operand op1(this, node->child1());
3766         SpeculateWhicheverInt52Operand op2(this, node->child2(), OppositeShift, op1);
3767         GPRTemporary result(this);
3768         
3769         GPRReg op1GPR = op1.gpr();
3770         GPRReg op2GPR = op2.gpr();
3771         GPRReg resultGPR = result.gpr();
3772         
3773         m_jit.move(op1GPR, resultGPR);
3774         speculationCheck(
3775             Int52Overflow, JSValueRegs(), 0,
3776             m_jit.branchMul64(MacroAssembler::Overflow, op2GPR, resultGPR));
3777         
3778         if (shouldCheckNegativeZero(node->arithMode())) {
3779             MacroAssembler::Jump resultNonZero = m_jit.branchTest64(
3780                 MacroAssembler::NonZero, resultGPR);
3781             speculationCheck(
3782                 NegativeZero, JSValueRegs(), 0,
3783                 m_jit.branch64(MacroAssembler::LessThan, op1GPR, TrustedImm64(0)));
3784             speculationCheck(
3785                 NegativeZero, JSValueRegs(), 0,
3786                 m_jit.branch64(MacroAssembler::LessThan, op2GPR, TrustedImm64(0)));
3787             resultNonZero.link(&m_jit);
3788         }
3789         
3790         int52Result(resultGPR, node);
3791         return;
3792     }
3793 #endif // USE(JSVALUE64)
3794         
3795     case DoubleRepUse: {
3796         SpeculateDoubleOperand op1(this, node->child1());
3797         SpeculateDoubleOperand op2(this, node->child2());
3798         FPRTemporary result(this, op1, op2);
3799         
3800         FPRReg reg1 = op1.fpr();
3801         FPRReg reg2 = op2.fpr();
3802         
3803         m_jit.mulDouble(reg1, reg2, result.fpr());
3804         
3805         doubleResult(result.fpr(), node);
3806         return;
3807     }
3808
3809     case UntypedUse: {
3810         Edge& leftChild = node->child1();
3811         Edge& rightChild = node->child2();
3812
3813         if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3814             JSValueOperand left(this, leftChild);
3815             JSValueOperand right(this, rightChild);
3816             JSValueRegs leftRegs = left.jsValueRegs();
3817             JSValueRegs rightRegs = right.jsValueRegs();
3818 #if USE(JSVALUE64)
3819             GPRTemporary result(this);
3820             JSValueRegs resultRegs = JSValueRegs(result.gpr());
3821 #else
3822             GPRTemporary resultTag(this);
3823             GPRTemporary resultPayload(this);
3824             JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3825 #endif
3826             flushRegisters();
3827             callOperation(operationValueMul, resultRegs, leftRegs, rightRegs);
3828             m_jit.exceptionCheck();
3829
3830             jsValueResult(resultRegs, node);
3831             return;
3832         }
3833
3834         Optional<JSValueOperand> left;
3835         Optional<JSValueOperand> right;
3836
3837         JSValueRegs leftRegs;
3838         JSValueRegs rightRegs;
3839
3840         FPRTemporary leftNumber(this);
3841         FPRTemporary rightNumber(this);
3842         FPRReg leftFPR = leftNumber.fpr();
3843         FPRReg rightFPR = rightNumber.fpr();
3844
3845 #if USE(JSVALUE64)
3846         GPRTemporary result(this);
3847         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3848         GPRTemporary scratch(this);
3849         GPRReg scratchGPR = scratch.gpr();
3850         FPRReg scratchFPR = InvalidFPRReg;
3851 #else
3852         GPRTemporary resultTag(this);
3853         GPRTemporary resultPayload(this);
3854         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3855         GPRReg scratchGPR = resultTag.gpr();
3856         FPRTemporary fprScratch(this);
3857         FPRReg scratchFPR = fprScratch.fpr();
3858 #endif
3859
3860         SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3861         SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3862
3863         // The snippet generator does not support both operands being constant. If the left
3864         // operand is already const, we'll ignore the right operand's constness.
3865         if (leftChild->isInt32Constant())
3866             leftOperand.setConstInt32(leftChild->asInt32());
3867         else if (rightChild->isInt32Constant())
3868             rightOperand.setConstInt32(rightChild->asInt32());
3869
3870         ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3871