6174603b4d919cd28310437b767e07ee5519e2da
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGOSRExitFuzz.h"
38 #include "DFGSaneStringGetByValSlowPathGenerator.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DirectArguments.h"
41 #include "JITAddGenerator.h"
42 #include "JITBitAndGenerator.h"
43 #include "JITBitOrGenerator.h"
44 #include "JITBitXorGenerator.h"
45 #include "JITDivGenerator.h"
46 #include "JITLeftShiftGenerator.h"
47 #include "JITMulGenerator.h"
48 #include "JITRightShiftGenerator.h"
49 #include "JITSubGenerator.h"
50 #include "JSCInlines.h"
51 #include "JSEnvironmentRecord.h"
52 #include "JSGeneratorFunction.h"
53 #include "JSLexicalEnvironment.h"
54 #include "LinkBuffer.h"
55 #include "ScopedArguments.h"
56 #include "ScratchRegisterAllocator.h"
57 #include "WriteBarrierBuffer.h"
58 #include <wtf/MathExtras.h>
59
60 namespace JSC { namespace DFG {
61
62 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
63     : m_compileOkay(true)
64     , m_jit(jit)
65     , m_currentNode(0)
66     , m_lastGeneratedNode(LastNodeType)
67     , m_indexInBlock(0)
68     , m_generationInfo(m_jit.graph().frameRegisterCount())
69     , m_state(m_jit.graph())
70     , m_interpreter(m_jit.graph(), m_state)
71     , m_stream(&jit.jitCode()->variableEventStream)
72     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
73 {
74 }
75
76 SpeculativeJIT::~SpeculativeJIT()
77 {
78 }
79
80 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
81 {
82     ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
83     
84     GPRTemporary scratch(this);
85     GPRTemporary scratch2(this);
86     GPRReg scratchGPR = scratch.gpr();
87     GPRReg scratch2GPR = scratch2.gpr();
88     
89     unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
90     
91     JITCompiler::JumpList slowCases;
92     
93     slowCases.append(
94         emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
95     m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
96     emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
97     
98     m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
99     m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
100     
101     if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
102 #if USE(JSVALUE64)
103         m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
104         for (unsigned i = numElements; i < vectorLength; ++i)
105             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
106 #else
107         EncodedValueDescriptor value;
108         value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
109         for (unsigned i = numElements; i < vectorLength; ++i) {
110             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
111             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
112         }
113 #endif
114     }
115     
116     // I want a slow path that also loads out the storage pointer, and that's
117     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
118     // of work for a very small piece of functionality. :-/
119     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
120         slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
121         structure, numElements));
122 }
123
124 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
125 {
126     if (inlineCallFrame && !inlineCallFrame->isVarargs())
127         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
128     else {
129         VirtualRegister argumentCountRegister;
130         if (!inlineCallFrame)
131             argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
132         else
133             argumentCountRegister = inlineCallFrame->argumentCountRegister;
134         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
135         if (!includeThis)
136             m_jit.sub32(TrustedImm32(1), lengthGPR);
137     }
138 }
139
140 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
141 {
142     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
143 }
144
145 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
146 {
147     if (origin.inlineCallFrame) {
148         if (origin.inlineCallFrame->isClosureCall) {
149             m_jit.loadPtr(
150                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
151                 calleeGPR);
152         } else {
153             m_jit.move(
154                 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
155                 calleeGPR);
156         }
157     } else
158         m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), calleeGPR);
159 }
160
161 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
162 {
163     m_jit.addPtr(
164         TrustedImm32(
165             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
166         GPRInfo::callFrameRegister, startGPR);
167 }
168
169 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
170 {
171     if (!doOSRExitFuzzing())
172         return MacroAssembler::Jump();
173     
174     MacroAssembler::Jump result;
175     
176     m_jit.pushToSave(GPRInfo::regT0);
177     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
178     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
179     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
180     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
181     unsigned at = Options::fireOSRExitFuzzAt();
182     if (at || atOrAfter) {
183         unsigned threshold;
184         MacroAssembler::RelationalCondition condition;
185         if (atOrAfter) {
186             threshold = atOrAfter;
187             condition = MacroAssembler::Below;
188         } else {
189             threshold = at;
190             condition = MacroAssembler::NotEqual;
191         }
192         MacroAssembler::Jump ok = m_jit.branch32(
193             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
194         m_jit.popToRestore(GPRInfo::regT0);
195         result = m_jit.jump();
196         ok.link(&m_jit);
197     }
198     m_jit.popToRestore(GPRInfo::regT0);
199     
200     return result;
201 }
202
203 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
204 {
205     if (!m_compileOkay)
206         return;
207     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
208     if (fuzzJump.isSet()) {
209         JITCompiler::JumpList jumpsToFail;
210         jumpsToFail.append(fuzzJump);
211         jumpsToFail.append(jumpToFail);
212         m_jit.appendExitInfo(jumpsToFail);
213     } else
214         m_jit.appendExitInfo(jumpToFail);
215     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
216 }
217
218 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
219 {
220     if (!m_compileOkay)
221         return;
222     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
223     if (fuzzJump.isSet()) {
224         JITCompiler::JumpList myJumpsToFail;
225         myJumpsToFail.append(jumpsToFail);
226         myJumpsToFail.append(fuzzJump);
227         m_jit.appendExitInfo(myJumpsToFail);
228     } else
229         m_jit.appendExitInfo(jumpsToFail);
230     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
231 }
232
233 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
234 {
235     if (!m_compileOkay)
236         return OSRExitJumpPlaceholder();
237     unsigned index = m_jit.jitCode()->osrExit.size();
238     m_jit.appendExitInfo();
239     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
240     return OSRExitJumpPlaceholder(index);
241 }
242
243 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
244 {
245     return speculationCheck(kind, jsValueSource, nodeUse.node());
246 }
247
248 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
249 {
250     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
251 }
252
253 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
254 {
255     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
256 }
257
258 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
259 {
260     if (!m_compileOkay)
261         return;
262     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
263     m_jit.appendExitInfo(jumpToFail);
264     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
265 }
266
267 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
268 {
269     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
270 }
271
272 void SpeculativeJIT::emitInvalidationPoint(Node* node)
273 {
274     if (!m_compileOkay)
275         return;
276     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
277     m_jit.jitCode()->appendOSRExit(OSRExit(
278         UncountableInvalidation, JSValueSource(),
279         m_jit.graph().methodOfGettingAValueProfileFor(node),
280         this, m_stream->size()));
281     info.m_replacementSource = m_jit.watchpointLabel();
282     ASSERT(info.m_replacementSource.isSet());
283     noResult(node);
284 }
285
286 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
287 {
288     if (!m_compileOkay)
289         return;
290     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
291     m_compileOkay = false;
292     if (verboseCompilationEnabled())
293         dataLog("Bailing compilation.\n");
294 }
295
296 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
297 {
298     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
299 }
300
301 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind exitKind)
302 {
303     ASSERT(needsTypeCheck(edge, typesPassedThrough));
304     m_interpreter.filter(edge, typesPassedThrough);
305     speculationCheck(exitKind, source, edge.node(), jumpToFail);
306 }
307
308 RegisterSet SpeculativeJIT::usedRegisters()
309 {
310     RegisterSet result;
311     
312     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
313         GPRReg gpr = GPRInfo::toRegister(i);
314         if (m_gprs.isInUse(gpr))
315             result.set(gpr);
316     }
317     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
318         FPRReg fpr = FPRInfo::toRegister(i);
319         if (m_fprs.isInUse(fpr))
320             result.set(fpr);
321     }
322     
323     result.merge(RegisterSet::stubUnavailableRegisters());
324     
325     return result;
326 }
327
328 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
329 {
330     m_slowPathGenerators.append(WTFMove(slowPathGenerator));
331 }
332
333 void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder)
334 {
335     for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i) {
336         pcToCodeOriginMapBuilder.appendItem(m_jit.label(), m_slowPathGenerators[i]->origin().semantic);
337         m_slowPathGenerators[i]->generate(this);
338     }
339 }
340
341 // On Windows we need to wrap fmod; on other platforms we can call it directly.
342 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
343 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
344 static double JIT_OPERATION fmodAsDFGOperation(double x, double y)
345 {
346     return fmod(x, y);
347 }
348 #else
349 #define fmodAsDFGOperation fmod
350 #endif
351
352 void SpeculativeJIT::clearGenerationInfo()
353 {
354     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
355         m_generationInfo[i] = GenerationInfo();
356     m_gprs = RegisterBank<GPRInfo>();
357     m_fprs = RegisterBank<FPRInfo>();
358 }
359
360 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
361 {
362     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
363     Node* node = info.node();
364     DataFormat registerFormat = info.registerFormat();
365     ASSERT(registerFormat != DataFormatNone);
366     ASSERT(registerFormat != DataFormatDouble);
367         
368     SilentSpillAction spillAction;
369     SilentFillAction fillAction;
370         
371     if (!info.needsSpill())
372         spillAction = DoNothingForSpill;
373     else {
374 #if USE(JSVALUE64)
375         ASSERT(info.gpr() == source);
376         if (registerFormat == DataFormatInt32)
377             spillAction = Store32Payload;
378         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
379             spillAction = StorePtr;
380         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
381             spillAction = Store64;
382         else {
383             ASSERT(registerFormat & DataFormatJS);
384             spillAction = Store64;
385         }
386 #elif USE(JSVALUE32_64)
387         if (registerFormat & DataFormatJS) {
388             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
389             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
390         } else {
391             ASSERT(info.gpr() == source);
392             spillAction = Store32Payload;
393         }
394 #endif
395     }
396         
397     if (registerFormat == DataFormatInt32) {
398         ASSERT(info.gpr() == source);
399         ASSERT(isJSInt32(info.registerFormat()));
400         if (node->hasConstant()) {
401             ASSERT(node->isInt32Constant());
402             fillAction = SetInt32Constant;
403         } else
404             fillAction = Load32Payload;
405     } else if (registerFormat == DataFormatBoolean) {
406 #if USE(JSVALUE64)
407         RELEASE_ASSERT_NOT_REACHED();
408 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
409         fillAction = DoNothingForFill;
410 #endif
411 #elif USE(JSVALUE32_64)
412         ASSERT(info.gpr() == source);
413         if (node->hasConstant()) {
414             ASSERT(node->isBooleanConstant());
415             fillAction = SetBooleanConstant;
416         } else
417             fillAction = Load32Payload;
418 #endif
419     } else if (registerFormat == DataFormatCell) {
420         ASSERT(info.gpr() == source);
421         if (node->hasConstant()) {
422             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
423             node->asCell(); // To get the assertion.
424             fillAction = SetCellConstant;
425         } else {
426 #if USE(JSVALUE64)
427             fillAction = LoadPtr;
428 #else
429             fillAction = Load32Payload;
430 #endif
431         }
432     } else if (registerFormat == DataFormatStorage) {
433         ASSERT(info.gpr() == source);
434         fillAction = LoadPtr;
435     } else if (registerFormat == DataFormatInt52) {
436         if (node->hasConstant())
437             fillAction = SetInt52Constant;
438         else if (info.spillFormat() == DataFormatInt52)
439             fillAction = Load64;
440         else if (info.spillFormat() == DataFormatStrictInt52)
441             fillAction = Load64ShiftInt52Left;
442         else if (info.spillFormat() == DataFormatNone)
443             fillAction = Load64;
444         else {
445             RELEASE_ASSERT_NOT_REACHED();
446 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
447             fillAction = Load64; // Make GCC happy.
448 #endif
449         }
450     } else if (registerFormat == DataFormatStrictInt52) {
451         if (node->hasConstant())
452             fillAction = SetStrictInt52Constant;
453         else if (info.spillFormat() == DataFormatInt52)
454             fillAction = Load64ShiftInt52Right;
455         else if (info.spillFormat() == DataFormatStrictInt52)
456             fillAction = Load64;
457         else if (info.spillFormat() == DataFormatNone)
458             fillAction = Load64;
459         else {
460             RELEASE_ASSERT_NOT_REACHED();
461 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
462             fillAction = Load64; // Make GCC happy.
463 #endif
464         }
465     } else {
466         ASSERT(registerFormat & DataFormatJS);
467 #if USE(JSVALUE64)
468         ASSERT(info.gpr() == source);
469         if (node->hasConstant()) {
470             if (node->isCellConstant())
471                 fillAction = SetTrustedJSConstant;
472             else
473                 fillAction = SetJSConstant;
474         } else if (info.spillFormat() == DataFormatInt32) {
475             ASSERT(registerFormat == DataFormatJSInt32);
476             fillAction = Load32PayloadBoxInt;
477         } else
478             fillAction = Load64;
479 #else
480         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
481         if (node->hasConstant())
482             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
483         else if (info.payloadGPR() == source)
484             fillAction = Load32Payload;
485         else { // Fill the Tag
486             switch (info.spillFormat()) {
487             case DataFormatInt32:
488                 ASSERT(registerFormat == DataFormatJSInt32);
489                 fillAction = SetInt32Tag;
490                 break;
491             case DataFormatCell:
492                 ASSERT(registerFormat == DataFormatJSCell);
493                 fillAction = SetCellTag;
494                 break;
495             case DataFormatBoolean:
496                 ASSERT(registerFormat == DataFormatJSBoolean);
497                 fillAction = SetBooleanTag;
498                 break;
499             default:
500                 fillAction = Load32Tag;
501                 break;
502             }
503         }
504 #endif
505     }
506         
507     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
508 }
509     
510 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
511 {
512     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
513     Node* node = info.node();
514     ASSERT(info.registerFormat() == DataFormatDouble);
515
516     SilentSpillAction spillAction;
517     SilentFillAction fillAction;
518         
519     if (!info.needsSpill())
520         spillAction = DoNothingForSpill;
521     else {
522         ASSERT(!node->hasConstant());
523         ASSERT(info.spillFormat() == DataFormatNone);
524         ASSERT(info.fpr() == source);
525         spillAction = StoreDouble;
526     }
527         
528 #if USE(JSVALUE64)
529     if (node->hasConstant()) {
530         node->asNumber(); // To get the assertion.
531         fillAction = SetDoubleConstant;
532     } else {
533         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
534         fillAction = LoadDouble;
535     }
536 #elif USE(JSVALUE32_64)
537     ASSERT(info.registerFormat() == DataFormatDouble);
538     if (node->hasConstant()) {
539         node->asNumber(); // To get the assertion.
540         fillAction = SetDoubleConstant;
541     } else
542         fillAction = LoadDouble;
543 #endif
544
545     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
546 }
547     
548 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
549 {
550     switch (plan.spillAction()) {
551     case DoNothingForSpill:
552         break;
553     case Store32Tag:
554         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
555         break;
556     case Store32Payload:
557         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
558         break;
559     case StorePtr:
560         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
561         break;
562 #if USE(JSVALUE64)
563     case Store64:
564         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
565         break;
566 #endif
567     case StoreDouble:
568         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
569         break;
570     default:
571         RELEASE_ASSERT_NOT_REACHED();
572     }
573 }
574     
575 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
576 {
577 #if USE(JSVALUE32_64)
578     UNUSED_PARAM(canTrample);
579 #endif
580     switch (plan.fillAction()) {
581     case DoNothingForFill:
582         break;
583     case SetInt32Constant:
584         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
585         break;
586 #if USE(JSVALUE64)
587     case SetInt52Constant:
588         m_jit.move(Imm64(plan.node()->asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
589         break;
590     case SetStrictInt52Constant:
591         m_jit.move(Imm64(plan.node()->asMachineInt()), plan.gpr());
592         break;
593 #endif // USE(JSVALUE64)
594     case SetBooleanConstant:
595         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
596         break;
597     case SetCellConstant:
598         m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
599         break;
600 #if USE(JSVALUE64)
601     case SetTrustedJSConstant:
602         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
603         break;
604     case SetJSConstant:
605         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
606         break;
607     case SetDoubleConstant:
608         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
609         m_jit.move64ToDouble(canTrample, plan.fpr());
610         break;
611     case Load32PayloadBoxInt:
612         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
613         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
614         break;
615     case Load32PayloadConvertToInt52:
616         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
617         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
618         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
619         break;
620     case Load32PayloadSignExtend:
621         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
622         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
623         break;
624 #else
625     case SetJSConstantTag:
626         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
627         break;
628     case SetJSConstantPayload:
629         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
630         break;
631     case SetInt32Tag:
632         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
633         break;
634     case SetCellTag:
635         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
636         break;
637     case SetBooleanTag:
638         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
639         break;
640     case SetDoubleConstant:
641         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
642         break;
643 #endif
644     case Load32Tag:
645         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
646         break;
647     case Load32Payload:
648         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
649         break;
650     case LoadPtr:
651         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
652         break;
653 #if USE(JSVALUE64)
654     case Load64:
655         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
656         break;
657     case Load64ShiftInt52Right:
658         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
659         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
660         break;
661     case Load64ShiftInt52Left:
662         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
663         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
664         break;
665 #endif
666     case LoadDouble:
667         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
668         break;
669     default:
670         RELEASE_ASSERT_NOT_REACHED();
671     }
672 }
673     
674 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
675 {
676     switch (arrayMode.arrayClass()) {
677     case Array::OriginalArray: {
678         CRASH();
679 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
680         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
681         return result;
682 #endif
683     }
684         
685     case Array::Array:
686         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
687         return m_jit.branch32(
688             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
689         
690     case Array::NonArray:
691     case Array::OriginalNonArray:
692         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
693         return m_jit.branch32(
694             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
695         
696     case Array::PossiblyArray:
697         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
698         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
699     }
700     
701     RELEASE_ASSERT_NOT_REACHED();
702     return JITCompiler::Jump();
703 }
704
705 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
706 {
707     JITCompiler::JumpList result;
708     
709     switch (arrayMode.type()) {
710     case Array::Int32:
711         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
712
713     case Array::Double:
714         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
715
716     case Array::Contiguous:
717         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
718
719     case Array::Undecided:
720         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
721
722     case Array::ArrayStorage:
723     case Array::SlowPutArrayStorage: {
724         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
725         
726         if (arrayMode.isJSArray()) {
727             if (arrayMode.isSlowPut()) {
728                 result.append(
729                     m_jit.branchTest32(
730                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
731                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
732                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
733                 result.append(
734                     m_jit.branch32(
735                         MacroAssembler::Above, tempGPR,
736                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
737                 break;
738             }
739             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
740             result.append(
741                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
742             break;
743         }
744         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
745         if (arrayMode.isSlowPut()) {
746             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
747             result.append(
748                 m_jit.branch32(
749                     MacroAssembler::Above, tempGPR,
750                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
751             break;
752         }
753         result.append(
754             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
755         break;
756     }
757     default:
758         CRASH();
759         break;
760     }
761     
762     return result;
763 }
764
765 void SpeculativeJIT::checkArray(Node* node)
766 {
767     ASSERT(node->arrayMode().isSpecific());
768     ASSERT(!node->arrayMode().doesConversion());
769     
770     SpeculateCellOperand base(this, node->child1());
771     GPRReg baseReg = base.gpr();
772     
773     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
774         noResult(m_currentNode);
775         return;
776     }
777     
778     const ClassInfo* expectedClassInfo = 0;
779     
780     switch (node->arrayMode().type()) {
781     case Array::AnyTypedArray:
782     case Array::String:
783         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
784         break;
785     case Array::Int32:
786     case Array::Double:
787     case Array::Contiguous:
788     case Array::Undecided:
789     case Array::ArrayStorage:
790     case Array::SlowPutArrayStorage: {
791         GPRTemporary temp(this);
792         GPRReg tempGPR = temp.gpr();
793         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
794         speculationCheck(
795             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
796             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
797         
798         noResult(m_currentNode);
799         return;
800     }
801     case Array::DirectArguments:
802         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
803         noResult(m_currentNode);
804         return;
805     case Array::ScopedArguments:
806         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
807         noResult(m_currentNode);
808         return;
809     default:
810         speculateCellTypeWithoutTypeFiltering(
811             node->child1(), baseReg,
812             typeForTypedArrayType(node->arrayMode().typedArrayType()));
813         noResult(m_currentNode);
814         return;
815     }
816     
817     RELEASE_ASSERT(expectedClassInfo);
818     
819     GPRTemporary temp(this);
820     GPRTemporary temp2(this);
821     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
822     speculationCheck(
823         BadType, JSValueSource::unboxedCell(baseReg), node,
824         m_jit.branchPtr(
825             MacroAssembler::NotEqual,
826             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
827             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
828     
829     noResult(m_currentNode);
830 }
831
832 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
833 {
834     ASSERT(node->arrayMode().doesConversion());
835     
836     GPRTemporary temp(this);
837     GPRTemporary structure;
838     GPRReg tempGPR = temp.gpr();
839     GPRReg structureGPR = InvalidGPRReg;
840     
841     if (node->op() != ArrayifyToStructure) {
842         GPRTemporary realStructure(this);
843         structure.adopt(realStructure);
844         structureGPR = structure.gpr();
845     }
846         
847     // We can skip all that comes next if we already have array storage.
848     MacroAssembler::JumpList slowPath;
849     
850     if (node->op() == ArrayifyToStructure) {
851         slowPath.append(m_jit.branchWeakStructure(
852             JITCompiler::NotEqual,
853             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
854             node->structure()));
855     } else {
856         m_jit.load8(
857             MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
858         
859         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
860     }
861     
862     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
863         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
864     
865     noResult(m_currentNode);
866 }
867
868 void SpeculativeJIT::arrayify(Node* node)
869 {
870     ASSERT(node->arrayMode().isSpecific());
871     
872     SpeculateCellOperand base(this, node->child1());
873     
874     if (!node->child2()) {
875         arrayify(node, base.gpr(), InvalidGPRReg);
876         return;
877     }
878     
879     SpeculateInt32Operand property(this, node->child2());
880     
881     arrayify(node, base.gpr(), property.gpr());
882 }
883
884 GPRReg SpeculativeJIT::fillStorage(Edge edge)
885 {
886     VirtualRegister virtualRegister = edge->virtualRegister();
887     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
888     
889     switch (info.registerFormat()) {
890     case DataFormatNone: {
891         if (info.spillFormat() == DataFormatStorage) {
892             GPRReg gpr = allocate();
893             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
894             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
895             info.fillStorage(*m_stream, gpr);
896             return gpr;
897         }
898         
899         // Must be a cell; fill it as a cell and then return the pointer.
900         return fillSpeculateCell(edge);
901     }
902         
903     case DataFormatStorage: {
904         GPRReg gpr = info.gpr();
905         m_gprs.lock(gpr);
906         return gpr;
907     }
908         
909     default:
910         return fillSpeculateCell(edge);
911     }
912 }
913
914 void SpeculativeJIT::useChildren(Node* node)
915 {
916     if (node->flags() & NodeHasVarArgs) {
917         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
918             if (!!m_jit.graph().m_varArgChildren[childIdx])
919                 use(m_jit.graph().m_varArgChildren[childIdx]);
920         }
921     } else {
922         Edge child1 = node->child1();
923         if (!child1) {
924             ASSERT(!node->child2() && !node->child3());
925             return;
926         }
927         use(child1);
928         
929         Edge child2 = node->child2();
930         if (!child2) {
931             ASSERT(!node->child3());
932             return;
933         }
934         use(child2);
935         
936         Edge child3 = node->child3();
937         if (!child3)
938             return;
939         use(child3);
940     }
941 }
942
943 void SpeculativeJIT::compileIn(Node* node)
944 {
945     SpeculateCellOperand base(this, node->child2());
946     GPRReg baseGPR = base.gpr();
947     
948     if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
949         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
950             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
951             
952             GPRTemporary result(this);
953             GPRReg resultGPR = result.gpr();
954
955             use(node->child1());
956             
957             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
958             MacroAssembler::Label done = m_jit.label();
959             
960             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
961             // we can cast it to const AtomicStringImpl* safely.
962             auto slowPath = slowPathCall(
963                 jump.m_jump, this, operationInOptimize,
964                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
965                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
966             
967             stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
968             stubInfo->codeOrigin = node->origin.semantic;
969             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
970             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
971 #if USE(JSVALUE32_64)
972             stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
973             stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
974 #endif
975             stubInfo->patch.usedRegisters = usedRegisters();
976
977             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
978             addSlowPathGenerator(WTFMove(slowPath));
979
980             base.use();
981
982             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
983             return;
984         }
985     }
986
987     JSValueOperand key(this, node->child1());
988     JSValueRegs regs = key.jsValueRegs();
989         
990     GPRFlushedCallResult result(this);
991     GPRReg resultGPR = result.gpr();
992         
993     base.use();
994     key.use();
995         
996     flushRegisters();
997     callOperation(
998         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
999         baseGPR, regs);
1000     m_jit.exceptionCheck();
1001     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1002 }
1003
1004 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1005 {
1006     unsigned branchIndexInBlock = detectPeepHoleBranch();
1007     if (branchIndexInBlock != UINT_MAX) {
1008         Node* branchNode = m_block->at(branchIndexInBlock);
1009
1010         ASSERT(node->adjustedRefCount() == 1);
1011         
1012         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1013     
1014         m_indexInBlock = branchIndexInBlock;
1015         m_currentNode = branchNode;
1016         
1017         return true;
1018     }
1019     
1020     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1021     
1022     return false;
1023 }
1024
1025 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1026 {
1027     unsigned branchIndexInBlock = detectPeepHoleBranch();
1028     if (branchIndexInBlock != UINT_MAX) {
1029         Node* branchNode = m_block->at(branchIndexInBlock);
1030
1031         ASSERT(node->adjustedRefCount() == 1);
1032         
1033         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1034     
1035         m_indexInBlock = branchIndexInBlock;
1036         m_currentNode = branchNode;
1037         
1038         return true;
1039     }
1040     
1041     nonSpeculativeNonPeepholeStrictEq(node, invert);
1042     
1043     return false;
1044 }
1045
1046 static const char* dataFormatString(DataFormat format)
1047 {
1048     // These values correspond to the DataFormat enum.
1049     const char* strings[] = {
1050         "[  ]",
1051         "[ i]",
1052         "[ d]",
1053         "[ c]",
1054         "Err!",
1055         "Err!",
1056         "Err!",
1057         "Err!",
1058         "[J ]",
1059         "[Ji]",
1060         "[Jd]",
1061         "[Jc]",
1062         "Err!",
1063         "Err!",
1064         "Err!",
1065         "Err!",
1066     };
1067     return strings[format];
1068 }
1069
1070 void SpeculativeJIT::dump(const char* label)
1071 {
1072     if (label)
1073         dataLogF("<%s>\n", label);
1074
1075     dataLogF("  gprs:\n");
1076     m_gprs.dump();
1077     dataLogF("  fprs:\n");
1078     m_fprs.dump();
1079     dataLogF("  VirtualRegisters:\n");
1080     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1081         GenerationInfo& info = m_generationInfo[i];
1082         if (info.alive())
1083             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1084         else
1085             dataLogF("    % 3d:[__][__]", i);
1086         if (info.registerFormat() == DataFormatDouble)
1087             dataLogF(":fpr%d\n", info.fpr());
1088         else if (info.registerFormat() != DataFormatNone
1089 #if USE(JSVALUE32_64)
1090             && !(info.registerFormat() & DataFormatJS)
1091 #endif
1092             ) {
1093             ASSERT(info.gpr() != InvalidGPRReg);
1094             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1095         } else
1096             dataLogF("\n");
1097     }
1098     if (label)
1099         dataLogF("</%s>\n", label);
1100 }
1101
1102 GPRTemporary::GPRTemporary()
1103     : m_jit(0)
1104     , m_gpr(InvalidGPRReg)
1105 {
1106 }
1107
1108 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1109     : m_jit(jit)
1110     , m_gpr(InvalidGPRReg)
1111 {
1112     m_gpr = m_jit->allocate();
1113 }
1114
1115 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1116     : m_jit(jit)
1117     , m_gpr(InvalidGPRReg)
1118 {
1119     m_gpr = m_jit->allocate(specific);
1120 }
1121
1122 #if USE(JSVALUE32_64)
1123 GPRTemporary::GPRTemporary(
1124     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1125     : m_jit(jit)
1126     , m_gpr(InvalidGPRReg)
1127 {
1128     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1129         m_gpr = m_jit->reuse(op1.gpr(which));
1130     else
1131         m_gpr = m_jit->allocate();
1132 }
1133 #endif // USE(JSVALUE32_64)
1134
1135 JSValueRegsTemporary::JSValueRegsTemporary() { }
1136
1137 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1138 #if USE(JSVALUE64)
1139     : m_gpr(jit)
1140 #else
1141     : m_payloadGPR(jit)
1142     , m_tagGPR(jit)
1143 #endif
1144 {
1145 }
1146
1147 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1148
1149 JSValueRegs JSValueRegsTemporary::regs()
1150 {
1151 #if USE(JSVALUE64)
1152     return JSValueRegs(m_gpr.gpr());
1153 #else
1154     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1155 #endif
1156 }
1157
1158 void GPRTemporary::adopt(GPRTemporary& other)
1159 {
1160     ASSERT(!m_jit);
1161     ASSERT(m_gpr == InvalidGPRReg);
1162     ASSERT(other.m_jit);
1163     ASSERT(other.m_gpr != InvalidGPRReg);
1164     m_jit = other.m_jit;
1165     m_gpr = other.m_gpr;
1166     other.m_jit = 0;
1167     other.m_gpr = InvalidGPRReg;
1168 }
1169
1170 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1171     : m_jit(jit)
1172     , m_fpr(InvalidFPRReg)
1173 {
1174     m_fpr = m_jit->fprAllocate();
1175 }
1176
1177 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1178     : m_jit(jit)
1179     , m_fpr(InvalidFPRReg)
1180 {
1181     if (m_jit->canReuse(op1.node()))
1182         m_fpr = m_jit->reuse(op1.fpr());
1183     else
1184         m_fpr = m_jit->fprAllocate();
1185 }
1186
1187 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1188     : m_jit(jit)
1189     , m_fpr(InvalidFPRReg)
1190 {
1191     if (m_jit->canReuse(op1.node()))
1192         m_fpr = m_jit->reuse(op1.fpr());
1193     else if (m_jit->canReuse(op2.node()))
1194         m_fpr = m_jit->reuse(op2.fpr());
1195     else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1196         m_fpr = m_jit->reuse(op1.fpr());
1197     else
1198         m_fpr = m_jit->fprAllocate();
1199 }
1200
1201 #if USE(JSVALUE32_64)
1202 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1203     : m_jit(jit)
1204     , m_fpr(InvalidFPRReg)
1205 {
1206     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1207         m_fpr = m_jit->reuse(op1.fpr());
1208     else
1209         m_fpr = m_jit->fprAllocate();
1210 }
1211 #endif
1212
1213 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1214 {
1215     BasicBlock* taken = branchNode->branchData()->taken.block;
1216     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1217     
1218     SpeculateDoubleOperand op1(this, node->child1());
1219     SpeculateDoubleOperand op2(this, node->child2());
1220     
1221     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1222     jump(notTaken);
1223 }
1224
1225 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1226 {
1227     BasicBlock* taken = branchNode->branchData()->taken.block;
1228     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1229
1230     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1231     
1232     if (taken == nextBlock()) {
1233         condition = MacroAssembler::NotEqual;
1234         BasicBlock* tmp = taken;
1235         taken = notTaken;
1236         notTaken = tmp;
1237     }
1238
1239     SpeculateCellOperand op1(this, node->child1());
1240     SpeculateCellOperand op2(this, node->child2());
1241     
1242     GPRReg op1GPR = op1.gpr();
1243     GPRReg op2GPR = op2.gpr();
1244     
1245     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1246         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1247             speculationCheck(
1248                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1249         }
1250         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1251             speculationCheck(
1252                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1253         }
1254     } else {
1255         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1256             speculationCheck(
1257                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1258                 m_jit.branchIfNotObject(op1GPR));
1259         }
1260         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1261             m_jit.branchTest8(
1262                 MacroAssembler::NonZero, 
1263                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1264                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1265
1266         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1267             speculationCheck(
1268                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1269                 m_jit.branchIfNotObject(op2GPR));
1270         }
1271         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1272             m_jit.branchTest8(
1273                 MacroAssembler::NonZero, 
1274                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1275                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1276     }
1277
1278     branchPtr(condition, op1GPR, op2GPR, taken);
1279     jump(notTaken);
1280 }
1281
1282 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1283 {
1284     BasicBlock* taken = branchNode->branchData()->taken.block;
1285     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1286
1287     // The branch instruction will branch to the taken block.
1288     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1289     if (taken == nextBlock()) {
1290         condition = JITCompiler::invert(condition);
1291         BasicBlock* tmp = taken;
1292         taken = notTaken;
1293         notTaken = tmp;
1294     }
1295
1296     if (node->child1()->isBooleanConstant()) {
1297         bool imm = node->child1()->asBoolean();
1298         SpeculateBooleanOperand op2(this, node->child2());
1299         branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1300     } else if (node->child2()->isBooleanConstant()) {
1301         SpeculateBooleanOperand op1(this, node->child1());
1302         bool imm = node->child2()->asBoolean();
1303         branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1304     } else {
1305         SpeculateBooleanOperand op1(this, node->child1());
1306         SpeculateBooleanOperand op2(this, node->child2());
1307         branch32(condition, op1.gpr(), op2.gpr(), taken);
1308     }
1309
1310     jump(notTaken);
1311 }
1312
1313 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1314 {
1315     BasicBlock* taken = branchNode->branchData()->taken.block;
1316     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1317
1318     // The branch instruction will branch to the taken block.
1319     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1320     if (taken == nextBlock()) {
1321         condition = JITCompiler::invert(condition);
1322         BasicBlock* tmp = taken;
1323         taken = notTaken;
1324         notTaken = tmp;
1325     }
1326
1327     if (node->child1()->isInt32Constant()) {
1328         int32_t imm = node->child1()->asInt32();
1329         SpeculateInt32Operand op2(this, node->child2());
1330         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1331     } else if (node->child2()->isInt32Constant()) {
1332         SpeculateInt32Operand op1(this, node->child1());
1333         int32_t imm = node->child2()->asInt32();
1334         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1335     } else {
1336         SpeculateInt32Operand op1(this, node->child1());
1337         SpeculateInt32Operand op2(this, node->child2());
1338         branch32(condition, op1.gpr(), op2.gpr(), taken);
1339     }
1340
1341     jump(notTaken);
1342 }
1343
1344 // Returns true if the compare is fused with a subsequent branch.
1345 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1346 {
1347     // Fused compare & branch.
1348     unsigned branchIndexInBlock = detectPeepHoleBranch();
1349     if (branchIndexInBlock != UINT_MAX) {
1350         Node* branchNode = m_block->at(branchIndexInBlock);
1351
1352         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1353         // so can be no intervening nodes to also reference the compare. 
1354         ASSERT(node->adjustedRefCount() == 1);
1355
1356         if (node->isBinaryUseKind(Int32Use))
1357             compilePeepHoleInt32Branch(node, branchNode, condition);
1358 #if USE(JSVALUE64)
1359         else if (node->isBinaryUseKind(Int52RepUse))
1360             compilePeepHoleInt52Branch(node, branchNode, condition);
1361 #endif // USE(JSVALUE64)
1362         else if (node->isBinaryUseKind(DoubleRepUse))
1363             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1364         else if (node->op() == CompareEq) {
1365             if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1366                 // Use non-peephole comparison, for now.
1367                 return false;
1368             }
1369             if (node->isBinaryUseKind(BooleanUse))
1370                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1371             else if (node->isBinaryUseKind(SymbolUse))
1372                 compilePeepHoleSymbolEquality(node, branchNode);
1373             else if (node->isBinaryUseKind(ObjectUse))
1374                 compilePeepHoleObjectEquality(node, branchNode);
1375             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1376                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1377             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1378                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1379             else if (!needsTypeCheck(node->child1(), SpecOther))
1380                 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1381             else if (!needsTypeCheck(node->child2(), SpecOther))
1382                 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1383             else {
1384                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1385                 return true;
1386             }
1387         } else {
1388             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1389             return true;
1390         }
1391
1392         use(node->child1());
1393         use(node->child2());
1394         m_indexInBlock = branchIndexInBlock;
1395         m_currentNode = branchNode;
1396         return true;
1397     }
1398     return false;
1399 }
1400
1401 void SpeculativeJIT::noticeOSRBirth(Node* node)
1402 {
1403     if (!node->hasVirtualRegister())
1404         return;
1405     
1406     VirtualRegister virtualRegister = node->virtualRegister();
1407     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1408     
1409     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1410 }
1411
1412 void SpeculativeJIT::compileMovHint(Node* node)
1413 {
1414     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1415     
1416     Node* child = node->child1().node();
1417     noticeOSRBirth(child);
1418     
1419     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1420 }
1421
1422 void SpeculativeJIT::bail(AbortReason reason)
1423 {
1424     if (verboseCompilationEnabled())
1425         dataLog("Bailing compilation.\n");
1426     m_compileOkay = true;
1427     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1428     clearGenerationInfo();
1429 }
1430
1431 void SpeculativeJIT::compileCurrentBlock()
1432 {
1433     ASSERT(m_compileOkay);
1434     
1435     if (!m_block)
1436         return;
1437     
1438     ASSERT(m_block->isReachable);
1439     
1440     m_jit.blockHeads()[m_block->index] = m_jit.label();
1441
1442     if (!m_block->intersectionOfCFAHasVisited) {
1443         // Don't generate code for basic blocks that are unreachable according to CFA.
1444         // But to be sure that nobody has generated a jump to this block, drop in a
1445         // breakpoint here.
1446         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1447         return;
1448     }
1449
1450     m_stream->appendAndLog(VariableEvent::reset());
1451     
1452     m_jit.jitAssertHasValidCallFrame();
1453     m_jit.jitAssertTagsInPlace();
1454     m_jit.jitAssertArgumentCountSane();
1455
1456     m_state.reset();
1457     m_state.beginBasicBlock(m_block);
1458     
1459     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1460         int operand = m_block->variablesAtHead.operandForIndex(i);
1461         Node* node = m_block->variablesAtHead[i];
1462         if (!node)
1463             continue; // No need to record dead SetLocal's.
1464         
1465         VariableAccessData* variable = node->variableAccessData();
1466         DataFormat format;
1467         if (!node->refCount())
1468             continue; // No need to record dead SetLocal's.
1469         format = dataFormatFor(variable->flushFormat());
1470         m_stream->appendAndLog(
1471             VariableEvent::setLocal(
1472                 VirtualRegister(operand),
1473                 variable->machineLocal(),
1474                 format));
1475     }
1476
1477     m_origin = NodeOrigin();
1478     
1479     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1480         m_currentNode = m_block->at(m_indexInBlock);
1481         
1482         // We may have hit a contradiction that the CFA was aware of but that the JIT
1483         // didn't cause directly.
1484         if (!m_state.isValid()) {
1485             bail(DFGBailedAtTopOfBlock);
1486             return;
1487         }
1488
1489         m_interpreter.startExecuting();
1490         m_jit.setForNode(m_currentNode);
1491         m_origin = m_currentNode->origin;
1492         if (validationEnabled())
1493             m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1494         m_lastGeneratedNode = m_currentNode->op();
1495         
1496         ASSERT(m_currentNode->shouldGenerate());
1497         
1498         if (verboseCompilationEnabled()) {
1499             dataLogF(
1500                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1501                 (int)m_currentNode->index(),
1502                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1503             dataLog("\n");
1504         }
1505
1506         if (Options::validateDFGExceptionHandling() && mayExit(m_jit.graph(), m_currentNode) != DoesNotExit)
1507             m_jit.jitReleaseAssertNoException();
1508
1509         m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.label(), m_origin.semantic);
1510
1511         compile(m_currentNode);
1512         
1513         if (belongsInMinifiedGraph(m_currentNode->op()))
1514             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1515         
1516 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1517         m_jit.clearRegisterAllocationOffsets();
1518 #endif
1519         
1520         if (!m_compileOkay) {
1521             bail(DFGBailedAtEndOfNode);
1522             return;
1523         }
1524         
1525         // Make sure that the abstract state is rematerialized for the next node.
1526         m_interpreter.executeEffects(m_indexInBlock);
1527     }
1528     
1529     // Perform the most basic verification that children have been used correctly.
1530     if (!ASSERT_DISABLED) {
1531         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1532             GenerationInfo& info = m_generationInfo[index];
1533             RELEASE_ASSERT(!info.alive());
1534         }
1535     }
1536 }
1537
1538 // If we are making type predictions about our arguments then
1539 // we need to check that they are correct on function entry.
1540 void SpeculativeJIT::checkArgumentTypes()
1541 {
1542     ASSERT(!m_currentNode);
1543     m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1544
1545     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1546         Node* node = m_jit.graph().m_arguments[i];
1547         if (!node) {
1548             // The argument is dead. We don't do any checks for such arguments.
1549             continue;
1550         }
1551         
1552         ASSERT(node->op() == SetArgument);
1553         ASSERT(node->shouldGenerate());
1554
1555         VariableAccessData* variableAccessData = node->variableAccessData();
1556         FlushFormat format = variableAccessData->flushFormat();
1557         
1558         if (format == FlushedJSValue)
1559             continue;
1560         
1561         VirtualRegister virtualRegister = variableAccessData->local();
1562
1563         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1564         
1565 #if USE(JSVALUE64)
1566         switch (format) {
1567         case FlushedInt32: {
1568             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1569             break;
1570         }
1571         case FlushedBoolean: {
1572             GPRTemporary temp(this);
1573             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1574             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1575             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1576             break;
1577         }
1578         case FlushedCell: {
1579             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1580             break;
1581         }
1582         default:
1583             RELEASE_ASSERT_NOT_REACHED();
1584             break;
1585         }
1586 #else
1587         switch (format) {
1588         case FlushedInt32: {
1589             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1590             break;
1591         }
1592         case FlushedBoolean: {
1593             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1594             break;
1595         }
1596         case FlushedCell: {
1597             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1598             break;
1599         }
1600         default:
1601             RELEASE_ASSERT_NOT_REACHED();
1602             break;
1603         }
1604 #endif
1605     }
1606
1607     m_origin = NodeOrigin();
1608 }
1609
1610 bool SpeculativeJIT::compile()
1611 {
1612     checkArgumentTypes();
1613     
1614     ASSERT(!m_currentNode);
1615     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1616         m_jit.setForBlockIndex(blockIndex);
1617         m_block = m_jit.graph().block(blockIndex);
1618         compileCurrentBlock();
1619     }
1620     linkBranches();
1621     return true;
1622 }
1623
1624 void SpeculativeJIT::createOSREntries()
1625 {
1626     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1627         BasicBlock* block = m_jit.graph().block(blockIndex);
1628         if (!block)
1629             continue;
1630         if (!block->isOSRTarget)
1631             continue;
1632         
1633         // Currently we don't have OSR entry trampolines. We could add them
1634         // here if need be.
1635         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1636     }
1637 }
1638
1639 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1640 {
1641     unsigned osrEntryIndex = 0;
1642     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1643         BasicBlock* block = m_jit.graph().block(blockIndex);
1644         if (!block)
1645             continue;
1646         if (!block->isOSRTarget)
1647             continue;
1648         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1649     }
1650     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1651     
1652     if (verboseCompilationEnabled()) {
1653         DumpContext dumpContext;
1654         dataLog("OSR Entries:\n");
1655         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1656             dataLog("    ", inContext(entryData, &dumpContext), "\n");
1657         if (!dumpContext.isEmpty())
1658             dumpContext.dump(WTF::dataFile());
1659     }
1660 }
1661
1662 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1663 {
1664     Edge child3 = m_jit.graph().varArgChild(node, 2);
1665     Edge child4 = m_jit.graph().varArgChild(node, 3);
1666
1667     ArrayMode arrayMode = node->arrayMode();
1668     
1669     GPRReg baseReg = base.gpr();
1670     GPRReg propertyReg = property.gpr();
1671     
1672     SpeculateDoubleOperand value(this, child3);
1673
1674     FPRReg valueReg = value.fpr();
1675     
1676     DFG_TYPE_CHECK(
1677         JSValueRegs(), child3, SpecFullRealNumber,
1678         m_jit.branchDouble(
1679             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1680     
1681     if (!m_compileOkay)
1682         return;
1683     
1684     StorageOperand storage(this, child4);
1685     GPRReg storageReg = storage.gpr();
1686
1687     if (node->op() == PutByValAlias) {
1688         // Store the value to the array.
1689         GPRReg propertyReg = property.gpr();
1690         FPRReg valueReg = value.fpr();
1691         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1692         
1693         noResult(m_currentNode);
1694         return;
1695     }
1696     
1697     GPRTemporary temporary;
1698     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1699
1700     MacroAssembler::Jump slowCase;
1701     
1702     if (arrayMode.isInBounds()) {
1703         speculationCheck(
1704             OutOfBounds, JSValueRegs(), 0,
1705             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1706     } else {
1707         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1708         
1709         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1710         
1711         if (!arrayMode.isOutOfBounds())
1712             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1713         
1714         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1715         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1716         
1717         inBounds.link(&m_jit);
1718     }
1719     
1720     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1721
1722     base.use();
1723     property.use();
1724     value.use();
1725     storage.use();
1726     
1727     if (arrayMode.isOutOfBounds()) {
1728         addSlowPathGenerator(
1729             slowPathCall(
1730                 slowCase, this,
1731                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1732                 NoResult, baseReg, propertyReg, valueReg));
1733     }
1734
1735     noResult(m_currentNode, UseChildrenCalledExplicitly);
1736 }
1737
1738 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1739 {
1740     SpeculateCellOperand string(this, node->child1());
1741     SpeculateStrictInt32Operand index(this, node->child2());
1742     StorageOperand storage(this, node->child3());
1743
1744     GPRReg stringReg = string.gpr();
1745     GPRReg indexReg = index.gpr();
1746     GPRReg storageReg = storage.gpr();
1747     
1748     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1749
1750     // unsigned comparison so we can filter out negative indices and indices that are too large
1751     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1752
1753     GPRTemporary scratch(this);
1754     GPRReg scratchReg = scratch.gpr();
1755
1756     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1757
1758     // Load the character into scratchReg
1759     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1760
1761     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1762     JITCompiler::Jump cont8Bit = m_jit.jump();
1763
1764     is16Bit.link(&m_jit);
1765
1766     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1767
1768     cont8Bit.link(&m_jit);
1769
1770     int32Result(scratchReg, m_currentNode);
1771 }
1772
1773 void SpeculativeJIT::compileGetByValOnString(Node* node)
1774 {
1775     SpeculateCellOperand base(this, node->child1());
1776     SpeculateStrictInt32Operand property(this, node->child2());
1777     StorageOperand storage(this, node->child3());
1778     GPRReg baseReg = base.gpr();
1779     GPRReg propertyReg = property.gpr();
1780     GPRReg storageReg = storage.gpr();
1781
1782     GPRTemporary scratch(this);
1783     GPRReg scratchReg = scratch.gpr();
1784 #if USE(JSVALUE32_64)
1785     GPRTemporary resultTag;
1786     GPRReg resultTagReg = InvalidGPRReg;
1787     if (node->arrayMode().isOutOfBounds()) {
1788         GPRTemporary realResultTag(this);
1789         resultTag.adopt(realResultTag);
1790         resultTagReg = resultTag.gpr();
1791     }
1792 #endif
1793
1794     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1795
1796     // unsigned comparison so we can filter out negative indices and indices that are too large
1797     JITCompiler::Jump outOfBounds = m_jit.branch32(
1798         MacroAssembler::AboveOrEqual, propertyReg,
1799         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1800     if (node->arrayMode().isInBounds())
1801         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1802
1803     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1804
1805     // Load the character into scratchReg
1806     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1807
1808     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1809     JITCompiler::Jump cont8Bit = m_jit.jump();
1810
1811     is16Bit.link(&m_jit);
1812
1813     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1814
1815     JITCompiler::Jump bigCharacter =
1816         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1817
1818     // 8 bit string values don't need the isASCII check.
1819     cont8Bit.link(&m_jit);
1820
1821     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1822     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1823     m_jit.loadPtr(scratchReg, scratchReg);
1824
1825     addSlowPathGenerator(
1826         slowPathCall(
1827             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1828
1829     if (node->arrayMode().isOutOfBounds()) {
1830 #if USE(JSVALUE32_64)
1831         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1832 #endif
1833
1834         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1835         if (globalObject->stringPrototypeChainIsSane()) {
1836             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
1837             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
1838             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
1839             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
1840             // indexed properties either.
1841             // https://bugs.webkit.org/show_bug.cgi?id=144668
1842             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
1843             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
1844             
1845 #if USE(JSVALUE64)
1846             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1847                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
1848 #else
1849             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1850                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
1851                 baseReg, propertyReg));
1852 #endif
1853         } else {
1854 #if USE(JSVALUE64)
1855             addSlowPathGenerator(
1856                 slowPathCall(
1857                     outOfBounds, this, operationGetByValStringInt,
1858                     scratchReg, baseReg, propertyReg));
1859 #else
1860             addSlowPathGenerator(
1861                 slowPathCall(
1862                     outOfBounds, this, operationGetByValStringInt,
1863                     resultTagReg, scratchReg, baseReg, propertyReg));
1864 #endif
1865         }
1866         
1867 #if USE(JSVALUE64)
1868         jsValueResult(scratchReg, m_currentNode);
1869 #else
1870         jsValueResult(resultTagReg, scratchReg, m_currentNode);
1871 #endif
1872     } else
1873         cellResult(scratchReg, m_currentNode);
1874 }
1875
1876 void SpeculativeJIT::compileFromCharCode(Node* node)
1877 {
1878     Edge& child = node->child1();
1879     if (child.useKind() == UntypedUse) {
1880         JSValueOperand opr(this, child);
1881         JSValueRegs oprRegs = opr.jsValueRegs();
1882 #if USE(JSVALUE64)
1883         GPRTemporary result(this);
1884         JSValueRegs resultRegs = JSValueRegs(result.gpr());
1885 #else
1886         GPRTemporary resultTag(this);
1887         GPRTemporary resultPayload(this);
1888         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
1889 #endif
1890         flushRegisters();
1891         callOperation(operationStringFromCharCodeUntyped, resultRegs, oprRegs);
1892         m_jit.exceptionCheck();
1893         
1894         jsValueResult(resultRegs, node);
1895         return;
1896     }
1897
1898     SpeculateStrictInt32Operand property(this, child);
1899     GPRReg propertyReg = property.gpr();
1900     GPRTemporary smallStrings(this);
1901     GPRTemporary scratch(this);
1902     GPRReg scratchReg = scratch.gpr();
1903     GPRReg smallStringsReg = smallStrings.gpr();
1904
1905     JITCompiler::JumpList slowCases;
1906     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
1907     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
1908     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
1909
1910     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
1911     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
1912     cellResult(scratchReg, m_currentNode);
1913 }
1914
1915 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
1916 {
1917     VirtualRegister virtualRegister = node->virtualRegister();
1918     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1919
1920     switch (info.registerFormat()) {
1921     case DataFormatStorage:
1922         RELEASE_ASSERT_NOT_REACHED();
1923
1924     case DataFormatBoolean:
1925     case DataFormatCell:
1926         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1927         return GeneratedOperandTypeUnknown;
1928
1929     case DataFormatNone:
1930     case DataFormatJSCell:
1931     case DataFormatJS:
1932     case DataFormatJSBoolean:
1933     case DataFormatJSDouble:
1934         return GeneratedOperandJSValue;
1935
1936     case DataFormatJSInt32:
1937     case DataFormatInt32:
1938         return GeneratedOperandInteger;
1939
1940     default:
1941         RELEASE_ASSERT_NOT_REACHED();
1942         return GeneratedOperandTypeUnknown;
1943     }
1944 }
1945
1946 void SpeculativeJIT::compileValueToInt32(Node* node)
1947 {
1948     switch (node->child1().useKind()) {
1949 #if USE(JSVALUE64)
1950     case Int52RepUse: {
1951         SpeculateStrictInt52Operand op1(this, node->child1());
1952         GPRTemporary result(this, Reuse, op1);
1953         GPRReg op1GPR = op1.gpr();
1954         GPRReg resultGPR = result.gpr();
1955         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
1956         int32Result(resultGPR, node, DataFormatInt32);
1957         return;
1958     }
1959 #endif // USE(JSVALUE64)
1960         
1961     case DoubleRepUse: {
1962         GPRTemporary result(this);
1963         SpeculateDoubleOperand op1(this, node->child1());
1964         FPRReg fpr = op1.fpr();
1965         GPRReg gpr = result.gpr();
1966         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
1967         
1968         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
1969         
1970         int32Result(gpr, node);
1971         return;
1972     }
1973     
1974     case NumberUse:
1975     case NotCellUse: {
1976         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
1977         case GeneratedOperandInteger: {
1978             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
1979             GPRTemporary result(this, Reuse, op1);
1980             m_jit.move(op1.gpr(), result.gpr());
1981             int32Result(result.gpr(), node, op1.format());
1982             return;
1983         }
1984         case GeneratedOperandJSValue: {
1985             GPRTemporary result(this);
1986 #if USE(JSVALUE64)
1987             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1988
1989             GPRReg gpr = op1.gpr();
1990             GPRReg resultGpr = result.gpr();
1991             FPRTemporary tempFpr(this);
1992             FPRReg fpr = tempFpr.fpr();
1993
1994             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
1995             JITCompiler::JumpList converted;
1996
1997             if (node->child1().useKind() == NumberUse) {
1998                 DFG_TYPE_CHECK(
1999                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
2000                     m_jit.branchTest64(
2001                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2002             } else {
2003                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2004                 
2005                 DFG_TYPE_CHECK(
2006                     JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
2007                 
2008                 // It's not a cell: so true turns into 1 and all else turns into 0.
2009                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2010                 converted.append(m_jit.jump());
2011                 
2012                 isNumber.link(&m_jit);
2013             }
2014
2015             // First, if we get here we have a double encoded as a JSValue
2016             m_jit.move(gpr, resultGpr);
2017             unboxDouble(resultGpr, fpr);
2018
2019             silentSpillAllRegisters(resultGpr);
2020             callOperation(toInt32, resultGpr, fpr);
2021             silentFillAllRegisters(resultGpr);
2022
2023             converted.append(m_jit.jump());
2024
2025             isInteger.link(&m_jit);
2026             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2027
2028             converted.link(&m_jit);
2029 #else
2030             Node* childNode = node->child1().node();
2031             VirtualRegister virtualRegister = childNode->virtualRegister();
2032             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2033
2034             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2035
2036             GPRReg payloadGPR = op1.payloadGPR();
2037             GPRReg resultGpr = result.gpr();
2038         
2039             JITCompiler::JumpList converted;
2040
2041             if (info.registerFormat() == DataFormatJSInt32)
2042                 m_jit.move(payloadGPR, resultGpr);
2043             else {
2044                 GPRReg tagGPR = op1.tagGPR();
2045                 FPRTemporary tempFpr(this);
2046                 FPRReg fpr = tempFpr.fpr();
2047                 FPRTemporary scratch(this);
2048
2049                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2050
2051                 if (node->child1().useKind() == NumberUse) {
2052                     DFG_TYPE_CHECK(
2053                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2054                         m_jit.branch32(
2055                             MacroAssembler::AboveOrEqual, tagGPR,
2056                             TrustedImm32(JSValue::LowestTag)));
2057                 } else {
2058                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2059                     
2060                     DFG_TYPE_CHECK(
2061                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2062                         m_jit.branchIfCell(op1.jsValueRegs()));
2063                     
2064                     // It's not a cell: so true turns into 1 and all else turns into 0.
2065                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2066                     m_jit.move(TrustedImm32(0), resultGpr);
2067                     converted.append(m_jit.jump());
2068                     
2069                     isBoolean.link(&m_jit);
2070                     m_jit.move(payloadGPR, resultGpr);
2071                     converted.append(m_jit.jump());
2072                     
2073                     isNumber.link(&m_jit);
2074                 }
2075
2076                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2077
2078                 silentSpillAllRegisters(resultGpr);
2079                 callOperation(toInt32, resultGpr, fpr);
2080                 silentFillAllRegisters(resultGpr);
2081
2082                 converted.append(m_jit.jump());
2083
2084                 isInteger.link(&m_jit);
2085                 m_jit.move(payloadGPR, resultGpr);
2086
2087                 converted.link(&m_jit);
2088             }
2089 #endif
2090             int32Result(resultGpr, node);
2091             return;
2092         }
2093         case GeneratedOperandTypeUnknown:
2094             RELEASE_ASSERT(!m_compileOkay);
2095             return;
2096         }
2097         RELEASE_ASSERT_NOT_REACHED();
2098         return;
2099     }
2100     
2101     default:
2102         ASSERT(!m_compileOkay);
2103         return;
2104     }
2105 }
2106
2107 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2108 {
2109     if (doesOverflow(node->arithMode())) {
2110         // We know that this sometimes produces doubles. So produce a double every
2111         // time. This at least allows subsequent code to not have weird conditionals.
2112             
2113         SpeculateInt32Operand op1(this, node->child1());
2114         FPRTemporary result(this);
2115             
2116         GPRReg inputGPR = op1.gpr();
2117         FPRReg outputFPR = result.fpr();
2118             
2119         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2120             
2121         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2122         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2123         positive.link(&m_jit);
2124             
2125         doubleResult(outputFPR, node);
2126         return;
2127     }
2128     
2129     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2130
2131     SpeculateInt32Operand op1(this, node->child1());
2132     GPRTemporary result(this);
2133
2134     m_jit.move(op1.gpr(), result.gpr());
2135
2136     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2137
2138     int32Result(result.gpr(), node, op1.format());
2139 }
2140
2141 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2142 {
2143     SpeculateDoubleOperand op1(this, node->child1());
2144     FPRTemporary scratch(this);
2145     GPRTemporary result(this);
2146     
2147     FPRReg valueFPR = op1.fpr();
2148     FPRReg scratchFPR = scratch.fpr();
2149     GPRReg resultGPR = result.gpr();
2150
2151     JITCompiler::JumpList failureCases;
2152     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2153     m_jit.branchConvertDoubleToInt32(
2154         valueFPR, resultGPR, failureCases, scratchFPR,
2155         shouldCheckNegativeZero(node->arithMode()));
2156     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2157
2158     int32Result(resultGPR, node);
2159 }
2160
2161 void SpeculativeJIT::compileDoubleRep(Node* node)
2162 {
2163     switch (node->child1().useKind()) {
2164     case RealNumberUse: {
2165         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2166         FPRTemporary result(this);
2167         
2168         JSValueRegs op1Regs = op1.jsValueRegs();
2169         FPRReg resultFPR = result.fpr();
2170         
2171 #if USE(JSVALUE64)
2172         GPRTemporary temp(this);
2173         GPRReg tempGPR = temp.gpr();
2174         m_jit.move(op1Regs.gpr(), tempGPR);
2175         m_jit.unboxDoubleWithoutAssertions(tempGPR, resultFPR);
2176 #else
2177         FPRTemporary temp(this);
2178         FPRReg tempFPR = temp.fpr();
2179         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2180 #endif
2181         
2182         JITCompiler::Jump done = m_jit.branchDouble(
2183             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2184         
2185         DFG_TYPE_CHECK(
2186             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2187         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2188         
2189         done.link(&m_jit);
2190         
2191         doubleResult(resultFPR, node);
2192         return;
2193     }
2194     
2195     case NotCellUse:
2196     case NumberUse: {
2197         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2198
2199         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2200         if (isInt32Speculation(possibleTypes)) {
2201             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2202             FPRTemporary result(this);
2203             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2204             doubleResult(result.fpr(), node);
2205             return;
2206         }
2207
2208         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2209         FPRTemporary result(this);
2210
2211 #if USE(JSVALUE64)
2212         GPRTemporary temp(this);
2213
2214         GPRReg op1GPR = op1.gpr();
2215         GPRReg tempGPR = temp.gpr();
2216         FPRReg resultFPR = result.fpr();
2217         JITCompiler::JumpList done;
2218
2219         JITCompiler::Jump isInteger = m_jit.branch64(
2220             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2221
2222         if (node->child1().useKind() == NotCellUse) {
2223             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2224             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2225
2226             static const double zero = 0;
2227             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2228
2229             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2230             done.append(isNull);
2231
2232             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2233                 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2234
2235             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2236             static const double one = 1;
2237             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2238             done.append(m_jit.jump());
2239             done.append(isFalse);
2240
2241             isUndefined.link(&m_jit);
2242             static const double NaN = PNaN;
2243             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2244             done.append(m_jit.jump());
2245
2246             isNumber.link(&m_jit);
2247         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2248             typeCheck(
2249                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2250                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2251         }
2252     
2253         m_jit.move(op1GPR, tempGPR);
2254         unboxDouble(tempGPR, resultFPR);
2255         done.append(m_jit.jump());
2256     
2257         isInteger.link(&m_jit);
2258         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2259         done.link(&m_jit);
2260 #else // USE(JSVALUE64) -> this is the 32_64 case
2261         FPRTemporary temp(this);
2262     
2263         GPRReg op1TagGPR = op1.tagGPR();
2264         GPRReg op1PayloadGPR = op1.payloadGPR();
2265         FPRReg tempFPR = temp.fpr();
2266         FPRReg resultFPR = result.fpr();
2267         JITCompiler::JumpList done;
2268     
2269         JITCompiler::Jump isInteger = m_jit.branch32(
2270             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2271
2272         if (node->child1().useKind() == NotCellUse) {
2273             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2274             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2275
2276             static const double zero = 0;
2277             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2278
2279             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2280             done.append(isNull);
2281
2282             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2283
2284             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2285             static const double one = 1;
2286             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2287             done.append(m_jit.jump());
2288             done.append(isFalse);
2289
2290             isUndefined.link(&m_jit);
2291             static const double NaN = PNaN;
2292             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2293             done.append(m_jit.jump());
2294
2295             isNumber.link(&m_jit);
2296         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2297             typeCheck(
2298                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2299                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2300         }
2301
2302         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2303         done.append(m_jit.jump());
2304     
2305         isInteger.link(&m_jit);
2306         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2307         done.link(&m_jit);
2308 #endif // USE(JSVALUE64)
2309     
2310         doubleResult(resultFPR, node);
2311         return;
2312     }
2313         
2314 #if USE(JSVALUE64)
2315     case Int52RepUse: {
2316         SpeculateStrictInt52Operand value(this, node->child1());
2317         FPRTemporary result(this);
2318         
2319         GPRReg valueGPR = value.gpr();
2320         FPRReg resultFPR = result.fpr();
2321
2322         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2323         
2324         doubleResult(resultFPR, node);
2325         return;
2326     }
2327 #endif // USE(JSVALUE64)
2328         
2329     default:
2330         RELEASE_ASSERT_NOT_REACHED();
2331         return;
2332     }
2333 }
2334
2335 void SpeculativeJIT::compileValueRep(Node* node)
2336 {
2337     switch (node->child1().useKind()) {
2338     case DoubleRepUse: {
2339         SpeculateDoubleOperand value(this, node->child1());
2340         JSValueRegsTemporary result(this);
2341         
2342         FPRReg valueFPR = value.fpr();
2343         JSValueRegs resultRegs = result.regs();
2344         
2345         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2346         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2347         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2348         // local was purified.
2349         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2350             m_jit.purifyNaN(valueFPR);
2351
2352         boxDouble(valueFPR, resultRegs);
2353         
2354         jsValueResult(resultRegs, node);
2355         return;
2356     }
2357         
2358 #if USE(JSVALUE64)
2359     case Int52RepUse: {
2360         SpeculateStrictInt52Operand value(this, node->child1());
2361         GPRTemporary result(this);
2362         
2363         GPRReg valueGPR = value.gpr();
2364         GPRReg resultGPR = result.gpr();
2365         
2366         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2367         
2368         jsValueResult(resultGPR, node);
2369         return;
2370     }
2371 #endif // USE(JSVALUE64)
2372         
2373     default:
2374         RELEASE_ASSERT_NOT_REACHED();
2375         return;
2376     }
2377 }
2378
2379 static double clampDoubleToByte(double d)
2380 {
2381     d += 0.5;
2382     if (!(d > 0))
2383         d = 0;
2384     else if (d > 255)
2385         d = 255;
2386     return d;
2387 }
2388
2389 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2390 {
2391     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2392     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2393     jit.xorPtr(result, result);
2394     MacroAssembler::Jump clamped = jit.jump();
2395     tooBig.link(&jit);
2396     jit.move(JITCompiler::TrustedImm32(255), result);
2397     clamped.link(&jit);
2398     inBounds.link(&jit);
2399 }
2400
2401 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2402 {
2403     // Unordered compare so we pick up NaN
2404     static const double zero = 0;
2405     static const double byteMax = 255;
2406     static const double half = 0.5;
2407     jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2408     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2409     jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2410     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2411     
2412     jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2413     // FIXME: This should probably just use a floating point round!
2414     // https://bugs.webkit.org/show_bug.cgi?id=72054
2415     jit.addDouble(source, scratch);
2416     jit.truncateDoubleToInt32(scratch, result);   
2417     MacroAssembler::Jump truncatedInt = jit.jump();
2418     
2419     tooSmall.link(&jit);
2420     jit.xorPtr(result, result);
2421     MacroAssembler::Jump zeroed = jit.jump();
2422     
2423     tooBig.link(&jit);
2424     jit.move(JITCompiler::TrustedImm32(255), result);
2425     
2426     truncatedInt.link(&jit);
2427     zeroed.link(&jit);
2428
2429 }
2430
2431 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2432 {
2433     if (node->op() == PutByValAlias)
2434         return JITCompiler::Jump();
2435     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2436         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2437     if (view) {
2438         uint32_t length = view->length();
2439         Node* indexNode = m_jit.graph().child(node, 1).node();
2440         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2441             return JITCompiler::Jump();
2442         return m_jit.branch32(
2443             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2444     }
2445     return m_jit.branch32(
2446         MacroAssembler::AboveOrEqual, indexGPR,
2447         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2448 }
2449
2450 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2451 {
2452     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2453     if (!jump.isSet())
2454         return;
2455     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2456 }
2457
2458 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2459 {
2460     ASSERT(isInt(type));
2461     
2462     SpeculateCellOperand base(this, node->child1());
2463     SpeculateStrictInt32Operand property(this, node->child2());
2464     StorageOperand storage(this, node->child3());
2465
2466     GPRReg baseReg = base.gpr();
2467     GPRReg propertyReg = property.gpr();
2468     GPRReg storageReg = storage.gpr();
2469
2470     GPRTemporary result(this);
2471     GPRReg resultReg = result.gpr();
2472
2473     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2474
2475     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2476     switch (elementSize(type)) {
2477     case 1:
2478         if (isSigned(type))
2479             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2480         else
2481             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2482         break;
2483     case 2:
2484         if (isSigned(type))
2485             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2486         else
2487             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2488         break;
2489     case 4:
2490         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2491         break;
2492     default:
2493         CRASH();
2494     }
2495     if (elementSize(type) < 4 || isSigned(type)) {
2496         int32Result(resultReg, node);
2497         return;
2498     }
2499     
2500     ASSERT(elementSize(type) == 4 && !isSigned(type));
2501     if (node->shouldSpeculateInt32()) {
2502         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2503         int32Result(resultReg, node);
2504         return;
2505     }
2506     
2507 #if USE(JSVALUE64)
2508     if (node->shouldSpeculateMachineInt()) {
2509         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2510         strictInt52Result(resultReg, node);
2511         return;
2512     }
2513 #endif
2514     
2515     FPRTemporary fresult(this);
2516     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2517     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2518     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2519     positive.link(&m_jit);
2520     doubleResult(fresult.fpr(), node);
2521 }
2522
2523 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2524 {
2525     ASSERT(isInt(type));
2526     
2527     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2528     GPRReg storageReg = storage.gpr();
2529     
2530     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2531     
2532     GPRTemporary value;
2533     GPRReg valueGPR = InvalidGPRReg;
2534     
2535     if (valueUse->isConstant()) {
2536         JSValue jsValue = valueUse->asJSValue();
2537         if (!jsValue.isNumber()) {
2538             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2539             noResult(node);
2540             return;
2541         }
2542         double d = jsValue.asNumber();
2543         if (isClamped(type)) {
2544             ASSERT(elementSize(type) == 1);
2545             d = clampDoubleToByte(d);
2546         }
2547         GPRTemporary scratch(this);
2548         GPRReg scratchReg = scratch.gpr();
2549         m_jit.move(Imm32(toInt32(d)), scratchReg);
2550         value.adopt(scratch);
2551         valueGPR = scratchReg;
2552     } else {
2553         switch (valueUse.useKind()) {
2554         case Int32Use: {
2555             SpeculateInt32Operand valueOp(this, valueUse);
2556             GPRTemporary scratch(this);
2557             GPRReg scratchReg = scratch.gpr();
2558             m_jit.move(valueOp.gpr(), scratchReg);
2559             if (isClamped(type)) {
2560                 ASSERT(elementSize(type) == 1);
2561                 compileClampIntegerToByte(m_jit, scratchReg);
2562             }
2563             value.adopt(scratch);
2564             valueGPR = scratchReg;
2565             break;
2566         }
2567             
2568 #if USE(JSVALUE64)
2569         case Int52RepUse: {
2570             SpeculateStrictInt52Operand valueOp(this, valueUse);
2571             GPRTemporary scratch(this);
2572             GPRReg scratchReg = scratch.gpr();
2573             m_jit.move(valueOp.gpr(), scratchReg);
2574             if (isClamped(type)) {
2575                 ASSERT(elementSize(type) == 1);
2576                 MacroAssembler::Jump inBounds = m_jit.branch64(
2577                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2578                 MacroAssembler::Jump tooBig = m_jit.branch64(
2579                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2580                 m_jit.move(TrustedImm32(0), scratchReg);
2581                 MacroAssembler::Jump clamped = m_jit.jump();
2582                 tooBig.link(&m_jit);
2583                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2584                 clamped.link(&m_jit);
2585                 inBounds.link(&m_jit);
2586             }
2587             value.adopt(scratch);
2588             valueGPR = scratchReg;
2589             break;
2590         }
2591 #endif // USE(JSVALUE64)
2592             
2593         case DoubleRepUse: {
2594             if (isClamped(type)) {
2595                 ASSERT(elementSize(type) == 1);
2596                 SpeculateDoubleOperand valueOp(this, valueUse);
2597                 GPRTemporary result(this);
2598                 FPRTemporary floatScratch(this);
2599                 FPRReg fpr = valueOp.fpr();
2600                 GPRReg gpr = result.gpr();
2601                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2602                 value.adopt(result);
2603                 valueGPR = gpr;
2604             } else {
2605                 SpeculateDoubleOperand valueOp(this, valueUse);
2606                 GPRTemporary result(this);
2607                 FPRReg fpr = valueOp.fpr();
2608                 GPRReg gpr = result.gpr();
2609                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2610                 m_jit.xorPtr(gpr, gpr);
2611                 MacroAssembler::Jump fixed = m_jit.jump();
2612                 notNaN.link(&m_jit);
2613                 
2614                 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2615                     fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2616                 
2617                 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
2618                 
2619                 fixed.link(&m_jit);
2620                 value.adopt(result);
2621                 valueGPR = gpr;
2622             }
2623             break;
2624         }
2625             
2626         default:
2627             RELEASE_ASSERT_NOT_REACHED();
2628             break;
2629         }
2630     }
2631     
2632     ASSERT_UNUSED(valueGPR, valueGPR != property);
2633     ASSERT(valueGPR != base);
2634     ASSERT(valueGPR != storageReg);
2635     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2636     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2637         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2638         outOfBounds = MacroAssembler::Jump();
2639     }
2640
2641     switch (elementSize(type)) {
2642     case 1:
2643         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2644         break;
2645     case 2:
2646         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2647         break;
2648     case 4:
2649         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2650         break;
2651     default:
2652         CRASH();
2653     }
2654     if (outOfBounds.isSet())
2655         outOfBounds.link(&m_jit);
2656     noResult(node);
2657 }
2658
2659 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2660 {
2661     ASSERT(isFloat(type));
2662     
2663     SpeculateCellOperand base(this, node->child1());
2664     SpeculateStrictInt32Operand property(this, node->child2());
2665     StorageOperand storage(this, node->child3());
2666
2667     GPRReg baseReg = base.gpr();
2668     GPRReg propertyReg = property.gpr();
2669     GPRReg storageReg = storage.gpr();
2670
2671     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2672
2673     FPRTemporary result(this);
2674     FPRReg resultReg = result.fpr();
2675     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2676     switch (elementSize(type)) {
2677     case 4:
2678         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2679         m_jit.convertFloatToDouble(resultReg, resultReg);
2680         break;
2681     case 8: {
2682         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2683         break;
2684     }
2685     default:
2686         RELEASE_ASSERT_NOT_REACHED();
2687     }
2688     
2689     doubleResult(resultReg, node);
2690 }
2691
2692 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2693 {
2694     ASSERT(isFloat(type));
2695     
2696     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2697     GPRReg storageReg = storage.gpr();
2698     
2699     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2700     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2701
2702     SpeculateDoubleOperand valueOp(this, valueUse);
2703     FPRTemporary scratch(this);
2704     FPRReg valueFPR = valueOp.fpr();
2705     FPRReg scratchFPR = scratch.fpr();
2706
2707     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2708     
2709     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2710     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2711         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2712         outOfBounds = MacroAssembler::Jump();
2713     }
2714     
2715     switch (elementSize(type)) {
2716     case 4: {
2717         m_jit.moveDouble(valueFPR, scratchFPR);
2718         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2719         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2720         break;
2721     }
2722     case 8:
2723         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2724         break;
2725     default:
2726         RELEASE_ASSERT_NOT_REACHED();
2727     }
2728     if (outOfBounds.isSet())
2729         outOfBounds.link(&m_jit);
2730     noResult(node);
2731 }
2732
2733 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2734 {
2735     // Check that prototype is an object.
2736     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
2737     
2738     // Initialize scratchReg with the value being checked.
2739     m_jit.move(valueReg, scratchReg);
2740     
2741     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2742     MacroAssembler::Label loop(&m_jit);
2743     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2744     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2745     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2746 #if USE(JSVALUE64)
2747     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2748 #else
2749     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2750 #endif
2751     
2752     // No match - result is false.
2753 #if USE(JSVALUE64)
2754     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2755 #else
2756     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2757 #endif
2758     MacroAssembler::Jump putResult = m_jit.jump();
2759     
2760     isInstance.link(&m_jit);
2761 #if USE(JSVALUE64)
2762     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2763 #else
2764     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2765 #endif
2766     
2767     putResult.link(&m_jit);
2768 }
2769
2770 void SpeculativeJIT::compileCheckTypeInfoFlags(Node* node)
2771 {
2772     SpeculateCellOperand base(this, node->child1());
2773
2774     GPRReg baseGPR = base.gpr();
2775
2776     speculationCheck(BadTypeInfoFlags, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(node->typeInfoOperand())));
2777
2778     noResult(node);
2779 }
2780
2781 void SpeculativeJIT::compileInstanceOf(Node* node)
2782 {
2783     if (node->child1().useKind() == UntypedUse) {
2784         // It might not be a cell. Speculate less aggressively.
2785         // Or: it might only be used once (i.e. by us), so we get zero benefit
2786         // from speculating any more aggressively than we absolutely need to.
2787         
2788         JSValueOperand value(this, node->child1());
2789         SpeculateCellOperand prototype(this, node->child2());
2790         GPRTemporary scratch(this);
2791         GPRTemporary scratch2(this);
2792         
2793         GPRReg prototypeReg = prototype.gpr();
2794         GPRReg scratchReg = scratch.gpr();
2795         GPRReg scratch2Reg = scratch2.gpr();
2796         
2797         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
2798         GPRReg valueReg = value.jsValueRegs().payloadGPR();
2799         moveFalseTo(scratchReg);
2800
2801         MacroAssembler::Jump done = m_jit.jump();
2802         
2803         isCell.link(&m_jit);
2804         
2805         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2806         
2807         done.link(&m_jit);
2808
2809         blessedBooleanResult(scratchReg, node);
2810         return;
2811     }
2812     
2813     SpeculateCellOperand value(this, node->child1());
2814     SpeculateCellOperand prototype(this, node->child2());
2815     
2816     GPRTemporary scratch(this);
2817     GPRTemporary scratch2(this);
2818     
2819     GPRReg valueReg = value.gpr();
2820     GPRReg prototypeReg = prototype.gpr();
2821     GPRReg scratchReg = scratch.gpr();
2822     GPRReg scratch2Reg = scratch2.gpr();
2823     
2824     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2825
2826     blessedBooleanResult(scratchReg, node);
2827 }
2828
2829 template<typename SnippetGenerator, J_JITOperation_EJJ snippetSlowPathFunction>
2830 void SpeculativeJIT::emitUntypedBitOp(Node* node)
2831 {
2832     Edge& leftChild = node->child1();
2833     Edge& rightChild = node->child2();
2834
2835     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
2836         JSValueOperand left(this, leftChild);
2837         JSValueOperand right(this, rightChild);
2838         JSValueRegs leftRegs = left.jsValueRegs();
2839         JSValueRegs rightRegs = right.jsValueRegs();
2840 #if USE(JSVALUE64)
2841         GPRTemporary result(this);
2842         JSValueRegs resultRegs = JSValueRegs(result.gpr());
2843 #else
2844         GPRTemporary resultTag(this);
2845         GPRTemporary resultPayload(this);
2846         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2847 #endif
2848         flushRegisters();
2849         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
2850         m_jit.exceptionCheck();
2851
2852         jsValueResult(resultRegs, node);
2853         return;
2854     }
2855
2856     Optional<JSValueOperand> left;
2857     Optional<JSValueOperand> right;
2858
2859     JSValueRegs leftRegs;
2860     JSValueRegs rightRegs;
2861
2862 #if USE(JSVALUE64)
2863     GPRTemporary result(this);
2864     JSValueRegs resultRegs = JSValueRegs(result.gpr());
2865     GPRTemporary scratch(this);
2866     GPRReg scratchGPR = scratch.gpr();
2867 #else
2868     GPRTemporary resultTag(this);
2869     GPRTemporary resultPayload(this);
2870     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2871     GPRReg scratchGPR = resultTag.gpr();
2872 #endif
2873
2874     SnippetOperand leftOperand;
2875     SnippetOperand rightOperand;
2876
2877     // The snippet generator does not support both operands being constant. If the left
2878     // operand is already const, we'll ignore the right operand's constness.
2879     if (leftChild->isInt32Constant())
2880         leftOperand.setConstInt32(leftChild->asInt32());
2881     else if (rightChild->isInt32Constant())
2882         rightOperand.setConstInt32(rightChild->asInt32());
2883
2884     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
2885
2886     if (!leftOperand.isConst()) {
2887         left = JSValueOperand(this, leftChild);
2888         leftRegs = left->jsValueRegs();
2889     }
2890     if (!rightOperand.isConst()) {
2891         right = JSValueOperand(this, rightChild);
2892         rightRegs = right->jsValueRegs();
2893     }
2894
2895     SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
2896     gen.generateFastPath(m_jit);
2897
2898     ASSERT(gen.didEmitFastPath());
2899     gen.endJumpList().append(m_jit.jump());
2900
2901     gen.slowPathJumpList().link(&m_jit);
2902     silentSpillAllRegisters(resultRegs);
2903
2904     if (leftOperand.isConst()) {
2905         leftRegs = resultRegs;
2906         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
2907     } else if (rightOperand.isConst()) {
2908         rightRegs = resultRegs;
2909         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
2910     }
2911
2912     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
2913
2914     silentFillAllRegisters(resultRegs);
2915     m_jit.exceptionCheck();
2916
2917     gen.endJumpList().link(&m_jit);
2918     jsValueResult(resultRegs, node);
2919 }
2920
2921 void SpeculativeJIT::compileBitwiseOp(Node* node)
2922 {
2923     NodeType op = node->op();
2924     Edge& leftChild = node->child1();
2925     Edge& rightChild = node->child2();
2926
2927     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
2928         switch (op) {
2929         case BitAnd:
2930             emitUntypedBitOp<JITBitAndGenerator, operationValueBitAnd>(node);
2931             return;
2932         case BitOr:
2933             emitUntypedBitOp<JITBitOrGenerator, operationValueBitOr>(node);
2934             return;
2935         case BitXor:
2936             emitUntypedBitOp<JITBitXorGenerator, operationValueBitXor>(node);
2937             return;
2938         default:
2939             RELEASE_ASSERT_NOT_REACHED();
2940         }
2941     }
2942
2943     if (leftChild->isInt32Constant()) {
2944         SpeculateInt32Operand op2(this, rightChild);
2945         GPRTemporary result(this, Reuse, op2);
2946
2947         bitOp(op, leftChild->asInt32(), op2.gpr(), result.gpr());
2948
2949         int32Result(result.gpr(), node);
2950
2951     } else if (rightChild->isInt32Constant()) {
2952         SpeculateInt32Operand op1(this, leftChild);
2953         GPRTemporary result(this, Reuse, op1);
2954
2955         bitOp(op, rightChild->asInt32(), op1.gpr(), result.gpr());
2956
2957         int32Result(result.gpr(), node);
2958
2959     } else {
2960         SpeculateInt32Operand op1(this, leftChild);
2961         SpeculateInt32Operand op2(this, rightChild);
2962         GPRTemporary result(this, Reuse, op1, op2);
2963         
2964         GPRReg reg1 = op1.gpr();
2965         GPRReg reg2 = op2.gpr();
2966         bitOp(op, reg1, reg2, result.gpr());
2967         
2968         int32Result(result.gpr(), node);
2969     }
2970 }
2971
2972 void SpeculativeJIT::emitUntypedRightShiftBitOp(Node* node)
2973 {
2974     J_JITOperation_EJJ snippetSlowPathFunction = node->op() == BitRShift
2975         ? operationValueBitRShift : operationValueBitURShift;
2976     JITRightShiftGenerator::ShiftType shiftType = node->op() == BitRShift
2977         ? JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift;
2978
2979     Edge& leftChild = node->child1();
2980     Edge& rightChild = node->child2();
2981
2982     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
2983         JSValueOperand left(this, leftChild);
2984         JSValueOperand right(this, rightChild);
2985         JSValueRegs leftRegs = left.jsValueRegs();
2986         JSValueRegs rightRegs = right.jsValueRegs();
2987 #if USE(JSVALUE64)
2988         GPRTemporary result(this);
2989         JSValueRegs resultRegs = JSValueRegs(result.gpr());
2990 #else
2991         GPRTemporary resultTag(this);
2992         GPRTemporary resultPayload(this);
2993         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2994 #endif
2995         flushRegisters();
2996         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
2997         m_jit.exceptionCheck();
2998
2999         jsValueResult(resultRegs, node);
3000         return;
3001     }
3002
3003     Optional<JSValueOperand> left;
3004     Optional<JSValueOperand> right;
3005
3006     JSValueRegs leftRegs;
3007     JSValueRegs rightRegs;
3008
3009     FPRTemporary leftNumber(this);
3010     FPRReg leftFPR = leftNumber.fpr();
3011
3012 #if USE(JSVALUE64)
3013     GPRTemporary result(this);
3014     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3015     GPRTemporary scratch(this);
3016     GPRReg scratchGPR = scratch.gpr();
3017     FPRReg scratchFPR = InvalidFPRReg;
3018 #else
3019     GPRTemporary resultTag(this);
3020     GPRTemporary resultPayload(this);
3021     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3022     GPRReg scratchGPR = resultTag.gpr();
3023     FPRTemporary fprScratch(this);
3024     FPRReg scratchFPR = fprScratch.fpr();
3025 #endif
3026
3027     SnippetOperand leftOperand;
3028     SnippetOperand rightOperand;
3029
3030     // The snippet generator does not support both operands being constant. If the left
3031     // operand is already const, we'll ignore the right operand's constness.
3032     if (leftChild->isInt32Constant())
3033         leftOperand.setConstInt32(leftChild->asInt32());
3034     else if (rightChild->isInt32Constant())
3035         rightOperand.setConstInt32(rightChild->asInt32());
3036
3037     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3038
3039     if (!leftOperand.isConst()) {
3040         left = JSValueOperand(this, leftChild);
3041         leftRegs = left->jsValueRegs();
3042     }
3043     if (!rightOperand.isConst()) {
3044         right = JSValueOperand(this, rightChild);
3045         rightRegs = right->jsValueRegs();
3046     }
3047
3048     JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3049         leftFPR, scratchGPR, scratchFPR, shiftType);
3050     gen.generateFastPath(m_jit);
3051
3052     ASSERT(gen.didEmitFastPath());
3053     gen.endJumpList().append(m_jit.jump());
3054
3055     gen.slowPathJumpList().link(&m_jit);
3056     silentSpillAllRegisters(resultRegs);
3057
3058     if (leftOperand.isConst()) {
3059         leftRegs = resultRegs;
3060         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3061     } else if (rightOperand.isConst()) {
3062         rightRegs = resultRegs;
3063         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3064     }
3065
3066     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3067
3068     silentFillAllRegisters(resultRegs);
3069     m_jit.exceptionCheck();
3070
3071     gen.endJumpList().link(&m_jit);
3072     jsValueResult(resultRegs, node);
3073     return;
3074 }
3075
3076 void SpeculativeJIT::compileShiftOp(Node* node)
3077 {
3078     NodeType op = node->op();
3079     Edge& leftChild = node->child1();
3080     Edge& rightChild = node->child2();
3081
3082     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3083         switch (op) {
3084         case BitLShift:
3085             emitUntypedBitOp<JITLeftShiftGenerator, operationValueBitLShift>(node);
3086             return;
3087         case BitRShift:
3088         case BitURShift:
3089             emitUntypedRightShiftBitOp(node);
3090             return;
3091         default:
3092             RELEASE_ASSERT_NOT_REACHED();
3093         }
3094     }
3095
3096     if (rightChild->isInt32Constant()) {
3097         SpeculateInt32Operand op1(this, leftChild);
3098         GPRTemporary result(this, Reuse, op1);
3099
3100         shiftOp(op, op1.gpr(), rightChild->asInt32() & 0x1f, result.gpr());
3101
3102         int32Result(result.gpr(), node);
3103     } else {
3104         // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
3105         SpeculateInt32Operand op1(this, leftChild);
3106         SpeculateInt32Operand op2(this, rightChild);
3107         GPRTemporary result(this, Reuse, op1);
3108
3109         GPRReg reg1 = op1.gpr();
3110         GPRReg reg2 = op2.gpr();
3111         shiftOp(op, reg1, reg2, result.gpr());
3112
3113         int32Result(result.gpr(), node);
3114     }
3115 }
3116
3117 void SpeculativeJIT::compileValueAdd(Node* node)
3118 {
3119     Edge& leftChild = node->child1();
3120     Edge& rightChild = node->child2();
3121
3122     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3123         JSValueOperand left(this, leftChild);
3124         JSValueOperand right(this, rightChild);
3125         JSValueRegs leftRegs = left.jsValueRegs();
3126         JSValueRegs rightRegs = right.jsValueRegs();
3127 #if USE(JSVALUE64)
3128         GPRTemporary result(this);
3129         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3130 #else
3131         GPRTemporary resultTag(this);
3132         GPRTemporary resultPayload(this);
3133         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3134 #endif
3135         flushRegisters();
3136         callOperation(operationValueAddNotNumber, resultRegs, leftRegs, rightRegs);
3137         m_jit.exceptionCheck();
3138     
3139         jsValueResult(resultRegs, node);
3140         return;
3141     }
3142
3143     Optional<JSValueOperand> left;
3144     Optional<JSValueOperand> right;
3145
3146     JSValueRegs leftRegs;
3147     JSValueRegs rightRegs;
3148
3149     FPRTemporary leftNumber(this);
3150     FPRTemporary rightNumber(this);
3151     FPRReg leftFPR = leftNumber.fpr();
3152     FPRReg rightFPR = rightNumber.fpr();
3153
3154 #if USE(JSVALUE64)
3155     GPRTemporary result(this);
3156     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3157     GPRTemporary scratch(this);
3158     GPRReg scratchGPR = scratch.gpr();
3159     FPRReg scratchFPR = InvalidFPRReg;
3160 #else
3161     GPRTemporary resultTag(this);
3162     GPRTemporary resultPayload(this);
3163     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3164     GPRReg scratchGPR = resultTag.gpr();
3165     FPRTemporary fprScratch(this);
3166     FPRReg scratchFPR = fprScratch.fpr();
3167 #endif
3168
3169     SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3170     SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3171
3172     // The snippet generator does not support both operands being constant. If the left
3173     // operand is already const, we'll ignore the right operand's constness.
3174     if (leftChild->isInt32Constant())
3175         leftOperand.setConstInt32(leftChild->asInt32());
3176     else if (rightChild->isInt32Constant())
3177         rightOperand.setConstInt32(rightChild->asInt32());
3178
3179     ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3180
3181     if (!leftOperand.isConst()) {
3182         left = JSValueOperand(this, leftChild);
3183         leftRegs = left->jsValueRegs();
3184     }
3185     if (!rightOperand.isConst()) {
3186         right = JSValueOperand(this, rightChild);
3187         rightRegs = right->jsValueRegs();
3188     }
3189
3190     JITAddGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3191         leftFPR, rightFPR, scratchGPR, scratchFPR);
3192     gen.generateFastPath(m_jit);
3193
3194     ASSERT(gen.didEmitFastPath());
3195     gen.endJumpList().append(m_jit.jump());
3196
3197     gen.slowPathJumpList().link(&m_jit);
3198
3199     silentSpillAllRegisters(resultRegs);
3200
3201     if (leftOperand.isConst()) {
3202         leftRegs = resultRegs;
3203         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3204     } else if (rightOperand.isConst()) {
3205         rightRegs = resultRegs;
3206         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3207     }
3208
3209     callOperation(operationValueAdd, resultRegs, leftRegs, rightRegs);
3210
3211     silentFillAllRegisters(resultRegs);
3212     m_jit.exceptionCheck();
3213
3214     gen.endJumpList().link(&m_jit);
3215     jsValueResult(resultRegs, node);
3216     return;
3217 }
3218
3219 void SpeculativeJIT::compileInstanceOfCustom(Node* node)
3220 {
3221     // We could do something smarter here but this case is currently super rare and unless
3222     // Symbol.hasInstance becomes popular will likely remain that way.
3223
3224     JSValueOperand value(this, node->child1());
3225     SpeculateCellOperand constructor(this, node->child2());
3226     JSValueOperand hasInstanceValue(this, node->child3());
3227     GPRTemporary result(this);
3228
3229     JSValueRegs valueRegs = value.jsValueRegs();
3230     GPRReg constructorGPR = constructor.gpr();
3231     JSValueRegs hasInstanceRegs = hasInstanceValue.jsValueRegs();
3232     GPRReg resultGPR = result.gpr();
3233
3234     MacroAssembler::Jump slowCase = m_jit.jump();
3235
3236     addSlowPathGenerator(slowPathCall(slowCase, this, operationInstanceOfCustom, resultGPR, valueRegs, constructorGPR, hasInstanceRegs));
3237
3238     unblessedBooleanResult(resultGPR, node);
3239 }
3240
3241 void SpeculativeJIT::compileArithAdd(Node* node)
3242 {
3243     switch (node->binaryUseKind()) {
3244     case Int32Use: {
3245         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3246         
3247         if (node->child1()->isInt32Constant()) {
3248             int32_t imm1 = node->child1()->asInt32();
3249             SpeculateInt32Operand op2(this, node->child2());
3250             GPRTemporary result(this);
3251
3252             if (!shouldCheckOverflow(node->arithMode())) {
3253                 m_jit.move(op2.gpr(), result.gpr());
3254                 m_jit.add32(Imm32(imm1), result.gpr());
3255             } else
3256                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
3257
3258             int32Result(result.gpr(), node);
3259             return;
3260         }
3261         
3262         if (node->child2()->isInt32Constant()) {
3263             SpeculateInt32Operand op1(this, node->child1());
3264             int32_t imm2 = node->child2()->asInt32();
3265             GPRTemporary result(this);
3266                 
3267             if (!shouldCheckOverflow(node->arithMode())) {
3268                 m_jit.move(op1.gpr(), result.gpr());
3269                 m_jit.add32(Imm32(imm2), result.gpr());
3270             } else
3271                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
3272
3273             int32Result(result.gpr(), node);
3274             return;
3275         }
3276                 
3277         SpeculateInt32Operand op1(this, node->child1());
3278         SpeculateInt32Operand op2(this, node->child2());
3279         GPRTemporary result(this, Reuse, op1, op2);
3280
3281         GPRReg gpr1 = op1.gpr();
3282         GPRReg gpr2 = op2.gpr();
3283         GPRReg gprResult = result.gpr();
3284
3285         if (!shouldCheckOverflow(node->arithMode())) {
3286             if (gpr1 == gprResult)
3287                 m_jit.add32(gpr2, gprResult);
3288             else {
3289                 m_jit.move(gpr2, gprResult);
3290                 m_jit.add32(gpr1, gprResult);
3291             }
3292         } else {
3293             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
3294                 
3295             if (gpr1 == gprResult)
3296                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
3297             else if (gpr2 == gprResult)
3298                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
3299             else
3300                 speculationCheck(Overflow, JSValueRegs(), 0, check);
3301         }
3302
3303         int32Result(gprResult, node);
3304         return;
3305     }
3306         
3307 #if USE(JSVALUE64)
3308     case Int52RepUse: {
3309         ASSERT(shouldCheckOverflow(node->arithMode()));
3310         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3311
3312         // Will we need an overflow check? If we can prove that neither input can be
3313         // Int52 then the overflow check will not be necessary.
3314         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
3315             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
3316             SpeculateWhicheverInt52Operand op1(this, node->child1());
3317             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3318             GPRTemporary result(this, Reuse, op1);
3319             m_jit.move(op1.gpr(), result.gpr());
3320             m_jit.add64(op2.gpr(), result.gpr());
3321             int52Result(result.gpr(), node, op1.format());
3322             return;
3323         }
3324         
3325         SpeculateInt52Operand op1(this, node->child1());
3326         SpeculateInt52Operand op2(this, node->child2());
3327         GPRTemporary result(this);
3328         m_jit.move(op1.gpr(), result.gpr());
3329         speculationCheck(
3330             Int52Overflow, JSValueRegs(), 0,
3331             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3332         int52Result(result.gpr(), node);
3333         return;
3334     }
3335 #endif // USE(JSVALUE64)
3336     
3337     case DoubleRepUse: {
3338         SpeculateDoubleOperand op1(this, node->child1());
3339         SpeculateDoubleOperand op2(this, node->child2());
3340         FPRTemporary result(this, op1, op2);
3341
3342         FPRReg reg1 = op1.fpr();
3343         FPRReg reg2 = op2.fpr();