164f13f87f2d4a674830538c75a46f20019dfb7c
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGOSRExitFuzz.h"
38 #include "DFGSaneStringGetByValSlowPathGenerator.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DirectArguments.h"
41 #include "JITAddGenerator.h"
42 #include "JITBitAndGenerator.h"
43 #include "JITBitOrGenerator.h"
44 #include "JITBitXorGenerator.h"
45 #include "JITDivGenerator.h"
46 #include "JITLeftShiftGenerator.h"
47 #include "JITMathIC.h"
48 #include "JITMulGenerator.h"
49 #include "JITRightShiftGenerator.h"
50 #include "JITSubGenerator.h"
51 #include "JSCInlines.h"
52 #include "JSEnvironmentRecord.h"
53 #include "JSGeneratorFunction.h"
54 #include "JSLexicalEnvironment.h"
55 #include "LinkBuffer.h"
56 #include "RegExpConstructor.h"
57 #include "ScopedArguments.h"
58 #include "ScratchRegisterAllocator.h"
59 #include "WriteBarrierBuffer.h"
60 #include <wtf/Box.h>
61 #include <wtf/MathExtras.h>
62
63 namespace JSC { namespace DFG {
64
65 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
66     : m_compileOkay(true)
67     , m_jit(jit)
68     , m_currentNode(0)
69     , m_lastGeneratedNode(LastNodeType)
70     , m_indexInBlock(0)
71     , m_generationInfo(m_jit.graph().frameRegisterCount())
72     , m_state(m_jit.graph())
73     , m_interpreter(m_jit.graph(), m_state)
74     , m_stream(&jit.jitCode()->variableEventStream)
75     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
76 {
77 }
78
79 SpeculativeJIT::~SpeculativeJIT()
80 {
81 }
82
83 void SpeculativeJIT::emitAllocateRawObject(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements, unsigned vectorLength)
84 {
85     IndexingType indexingType = structure->indexingType();
86     bool hasIndexingHeader = hasIndexedProperties(indexingType);
87
88     unsigned inlineCapacity = structure->inlineCapacity();
89     unsigned outOfLineCapacity = structure->outOfLineCapacity();
90     
91     GPRTemporary scratch(this);
92     GPRTemporary scratch2(this);
93     GPRReg scratchGPR = scratch.gpr();
94     GPRReg scratch2GPR = scratch2.gpr();
95
96     ASSERT(vectorLength >= numElements);
97     vectorLength = std::max(BASE_VECTOR_LEN, vectorLength);
98     
99     JITCompiler::JumpList slowCases;
100
101     size_t size = 0;
102     if (hasIndexingHeader)
103         size += vectorLength * sizeof(JSValue) + sizeof(IndexingHeader);
104     size += outOfLineCapacity * sizeof(JSValue);
105
106     if (size) {
107         slowCases.append(
108             emitAllocateBasicStorage(TrustedImm32(size), storageGPR));
109         if (hasIndexingHeader)
110             m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
111         else
112             m_jit.addPtr(TrustedImm32(sizeof(IndexingHeader)), storageGPR);
113     } else
114         m_jit.move(TrustedImmPtr(0), storageGPR);
115
116     size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
117     MarkedAllocator* allocatorPtr = &m_jit.vm()->heap.allocatorForObjectWithoutDestructor(allocationSize);
118     m_jit.move(TrustedImmPtr(allocatorPtr), scratchGPR);
119     emitAllocateJSObject(resultGPR, scratchGPR, TrustedImmPtr(structure), storageGPR, scratch2GPR, slowCases);
120
121     if (hasIndexingHeader)
122         m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
123
124     // I want a slow path that also loads out the storage pointer, and that's
125     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
126     // of work for a very small piece of functionality. :-/
127     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
128         slowCases, this, operationNewRawObject, resultGPR, storageGPR,
129         structure, vectorLength));
130
131     if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
132 #if USE(JSVALUE64)
133         m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
134         for (unsigned i = numElements; i < vectorLength; ++i)
135             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
136 #else
137         EncodedValueDescriptor value;
138         value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
139         for (unsigned i = numElements; i < vectorLength; ++i) {
140             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
141             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
142         }
143 #endif
144     }
145     
146     if (hasIndexingHeader)
147         m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
148 }
149
150 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
151 {
152     if (inlineCallFrame && !inlineCallFrame->isVarargs())
153         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
154     else {
155         VirtualRegister argumentCountRegister;
156         if (!inlineCallFrame)
157             argumentCountRegister = VirtualRegister(CallFrameSlot::argumentCount);
158         else
159             argumentCountRegister = inlineCallFrame->argumentCountRegister;
160         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
161         if (!includeThis)
162             m_jit.sub32(TrustedImm32(1), lengthGPR);
163     }
164 }
165
166 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
167 {
168     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
169 }
170
171 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
172 {
173     if (origin.inlineCallFrame) {
174         if (origin.inlineCallFrame->isClosureCall) {
175             m_jit.loadPtr(
176                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
177                 calleeGPR);
178         } else {
179             m_jit.move(
180                 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
181                 calleeGPR);
182         }
183     } else
184         m_jit.loadPtr(JITCompiler::addressFor(CallFrameSlot::callee), calleeGPR);
185 }
186
187 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
188 {
189     m_jit.addPtr(
190         TrustedImm32(
191             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
192         GPRInfo::callFrameRegister, startGPR);
193 }
194
195 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
196 {
197     if (!doOSRExitFuzzing())
198         return MacroAssembler::Jump();
199     
200     MacroAssembler::Jump result;
201     
202     m_jit.pushToSave(GPRInfo::regT0);
203     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
204     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
205     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
206     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
207     unsigned at = Options::fireOSRExitFuzzAt();
208     if (at || atOrAfter) {
209         unsigned threshold;
210         MacroAssembler::RelationalCondition condition;
211         if (atOrAfter) {
212             threshold = atOrAfter;
213             condition = MacroAssembler::Below;
214         } else {
215             threshold = at;
216             condition = MacroAssembler::NotEqual;
217         }
218         MacroAssembler::Jump ok = m_jit.branch32(
219             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
220         m_jit.popToRestore(GPRInfo::regT0);
221         result = m_jit.jump();
222         ok.link(&m_jit);
223     }
224     m_jit.popToRestore(GPRInfo::regT0);
225     
226     return result;
227 }
228
229 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
230 {
231     if (!m_compileOkay)
232         return;
233     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
234     if (fuzzJump.isSet()) {
235         JITCompiler::JumpList jumpsToFail;
236         jumpsToFail.append(fuzzJump);
237         jumpsToFail.append(jumpToFail);
238         m_jit.appendExitInfo(jumpsToFail);
239     } else
240         m_jit.appendExitInfo(jumpToFail);
241     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
242 }
243
244 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
245 {
246     if (!m_compileOkay)
247         return;
248     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
249     if (fuzzJump.isSet()) {
250         JITCompiler::JumpList myJumpsToFail;
251         myJumpsToFail.append(jumpsToFail);
252         myJumpsToFail.append(fuzzJump);
253         m_jit.appendExitInfo(myJumpsToFail);
254     } else
255         m_jit.appendExitInfo(jumpsToFail);
256     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
257 }
258
259 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
260 {
261     if (!m_compileOkay)
262         return OSRExitJumpPlaceholder();
263     unsigned index = m_jit.jitCode()->osrExit.size();
264     m_jit.appendExitInfo();
265     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
266     return OSRExitJumpPlaceholder(index);
267 }
268
269 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
270 {
271     return speculationCheck(kind, jsValueSource, nodeUse.node());
272 }
273
274 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
275 {
276     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
277 }
278
279 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
280 {
281     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
282 }
283
284 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
285 {
286     if (!m_compileOkay)
287         return;
288     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
289     m_jit.appendExitInfo(jumpToFail);
290     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
291 }
292
293 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
294 {
295     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
296 }
297
298 void SpeculativeJIT::emitInvalidationPoint(Node* node)
299 {
300     if (!m_compileOkay)
301         return;
302     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
303     m_jit.jitCode()->appendOSRExit(OSRExit(
304         UncountableInvalidation, JSValueSource(),
305         m_jit.graph().methodOfGettingAValueProfileFor(node),
306         this, m_stream->size()));
307     info.m_replacementSource = m_jit.watchpointLabel();
308     ASSERT(info.m_replacementSource.isSet());
309     noResult(node);
310 }
311
312 void SpeculativeJIT::unreachable(Node* node)
313 {
314     m_compileOkay = false;
315     m_jit.abortWithReason(DFGUnreachableNode, node->op());
316 }
317
318 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
319 {
320     if (!m_compileOkay)
321         return;
322     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
323     m_compileOkay = false;
324     if (verboseCompilationEnabled())
325         dataLog("Bailing compilation.\n");
326 }
327
328 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
329 {
330     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
331 }
332
333 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind exitKind)
334 {
335     ASSERT(needsTypeCheck(edge, typesPassedThrough));
336     m_interpreter.filter(edge, typesPassedThrough);
337     speculationCheck(exitKind, source, edge.node(), jumpToFail);
338 }
339
340 RegisterSet SpeculativeJIT::usedRegisters()
341 {
342     RegisterSet result;
343     
344     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
345         GPRReg gpr = GPRInfo::toRegister(i);
346         if (m_gprs.isInUse(gpr))
347             result.set(gpr);
348     }
349     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
350         FPRReg fpr = FPRInfo::toRegister(i);
351         if (m_fprs.isInUse(fpr))
352             result.set(fpr);
353     }
354     
355     result.merge(RegisterSet::stubUnavailableRegisters());
356     
357     return result;
358 }
359
360 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
361 {
362     m_slowPathGenerators.append(WTFMove(slowPathGenerator));
363 }
364
365 void SpeculativeJIT::addSlowPathGenerator(std::function<void()> lambda)
366 {
367     m_slowPathLambdas.append(SlowPathLambda{ lambda, m_currentNode, static_cast<unsigned>(m_stream->size()) });
368 }
369
370 void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder)
371 {
372     for (auto& slowPathGenerator : m_slowPathGenerators) {
373         pcToCodeOriginMapBuilder.appendItem(m_jit.label(), slowPathGenerator->origin().semantic);
374         slowPathGenerator->generate(this);
375     }
376     for (auto& slowPathLambda : m_slowPathLambdas) {
377         Node* currentNode = slowPathLambda.currentNode;
378         m_currentNode = currentNode;
379         m_outOfLineStreamIndex = slowPathLambda.streamIndex;
380         pcToCodeOriginMapBuilder.appendItem(m_jit.label(), currentNode->origin.semantic);
381         slowPathLambda.generator();
382         m_outOfLineStreamIndex = Nullopt;
383     }
384 }
385
386 void SpeculativeJIT::clearGenerationInfo()
387 {
388     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
389         m_generationInfo[i] = GenerationInfo();
390     m_gprs = RegisterBank<GPRInfo>();
391     m_fprs = RegisterBank<FPRInfo>();
392 }
393
394 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
395 {
396     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
397     Node* node = info.node();
398     DataFormat registerFormat = info.registerFormat();
399     ASSERT(registerFormat != DataFormatNone);
400     ASSERT(registerFormat != DataFormatDouble);
401         
402     SilentSpillAction spillAction;
403     SilentFillAction fillAction;
404         
405     if (!info.needsSpill())
406         spillAction = DoNothingForSpill;
407     else {
408 #if USE(JSVALUE64)
409         ASSERT(info.gpr() == source);
410         if (registerFormat == DataFormatInt32)
411             spillAction = Store32Payload;
412         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
413             spillAction = StorePtr;
414         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
415             spillAction = Store64;
416         else {
417             ASSERT(registerFormat & DataFormatJS);
418             spillAction = Store64;
419         }
420 #elif USE(JSVALUE32_64)
421         if (registerFormat & DataFormatJS) {
422             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
423             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
424         } else {
425             ASSERT(info.gpr() == source);
426             spillAction = Store32Payload;
427         }
428 #endif
429     }
430         
431     if (registerFormat == DataFormatInt32) {
432         ASSERT(info.gpr() == source);
433         ASSERT(isJSInt32(info.registerFormat()));
434         if (node->hasConstant()) {
435             ASSERT(node->isInt32Constant());
436             fillAction = SetInt32Constant;
437         } else
438             fillAction = Load32Payload;
439     } else if (registerFormat == DataFormatBoolean) {
440 #if USE(JSVALUE64)
441         RELEASE_ASSERT_NOT_REACHED();
442 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
443         fillAction = DoNothingForFill;
444 #endif
445 #elif USE(JSVALUE32_64)
446         ASSERT(info.gpr() == source);
447         if (node->hasConstant()) {
448             ASSERT(node->isBooleanConstant());
449             fillAction = SetBooleanConstant;
450         } else
451             fillAction = Load32Payload;
452 #endif
453     } else if (registerFormat == DataFormatCell) {
454         ASSERT(info.gpr() == source);
455         if (node->hasConstant()) {
456             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
457             node->asCell(); // To get the assertion.
458             fillAction = SetCellConstant;
459         } else {
460 #if USE(JSVALUE64)
461             fillAction = LoadPtr;
462 #else
463             fillAction = Load32Payload;
464 #endif
465         }
466     } else if (registerFormat == DataFormatStorage) {
467         ASSERT(info.gpr() == source);
468         fillAction = LoadPtr;
469     } else if (registerFormat == DataFormatInt52) {
470         if (node->hasConstant())
471             fillAction = SetInt52Constant;
472         else if (info.spillFormat() == DataFormatInt52)
473             fillAction = Load64;
474         else if (info.spillFormat() == DataFormatStrictInt52)
475             fillAction = Load64ShiftInt52Left;
476         else if (info.spillFormat() == DataFormatNone)
477             fillAction = Load64;
478         else {
479             RELEASE_ASSERT_NOT_REACHED();
480 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
481             fillAction = Load64; // Make GCC happy.
482 #endif
483         }
484     } else if (registerFormat == DataFormatStrictInt52) {
485         if (node->hasConstant())
486             fillAction = SetStrictInt52Constant;
487         else if (info.spillFormat() == DataFormatInt52)
488             fillAction = Load64ShiftInt52Right;
489         else if (info.spillFormat() == DataFormatStrictInt52)
490             fillAction = Load64;
491         else if (info.spillFormat() == DataFormatNone)
492             fillAction = Load64;
493         else {
494             RELEASE_ASSERT_NOT_REACHED();
495 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
496             fillAction = Load64; // Make GCC happy.
497 #endif
498         }
499     } else {
500         ASSERT(registerFormat & DataFormatJS);
501 #if USE(JSVALUE64)
502         ASSERT(info.gpr() == source);
503         if (node->hasConstant()) {
504             if (node->isCellConstant())
505                 fillAction = SetTrustedJSConstant;
506             else
507                 fillAction = SetJSConstant;
508         } else if (info.spillFormat() == DataFormatInt32) {
509             ASSERT(registerFormat == DataFormatJSInt32);
510             fillAction = Load32PayloadBoxInt;
511         } else
512             fillAction = Load64;
513 #else
514         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
515         if (node->hasConstant())
516             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
517         else if (info.payloadGPR() == source)
518             fillAction = Load32Payload;
519         else { // Fill the Tag
520             switch (info.spillFormat()) {
521             case DataFormatInt32:
522                 ASSERT(registerFormat == DataFormatJSInt32);
523                 fillAction = SetInt32Tag;
524                 break;
525             case DataFormatCell:
526                 ASSERT(registerFormat == DataFormatJSCell);
527                 fillAction = SetCellTag;
528                 break;
529             case DataFormatBoolean:
530                 ASSERT(registerFormat == DataFormatJSBoolean);
531                 fillAction = SetBooleanTag;
532                 break;
533             default:
534                 fillAction = Load32Tag;
535                 break;
536             }
537         }
538 #endif
539     }
540         
541     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
542 }
543     
544 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
545 {
546     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
547     Node* node = info.node();
548     ASSERT(info.registerFormat() == DataFormatDouble);
549
550     SilentSpillAction spillAction;
551     SilentFillAction fillAction;
552         
553     if (!info.needsSpill())
554         spillAction = DoNothingForSpill;
555     else {
556         ASSERT(!node->hasConstant());
557         ASSERT(info.spillFormat() == DataFormatNone);
558         ASSERT(info.fpr() == source);
559         spillAction = StoreDouble;
560     }
561         
562 #if USE(JSVALUE64)
563     if (node->hasConstant()) {
564         node->asNumber(); // To get the assertion.
565         fillAction = SetDoubleConstant;
566     } else {
567         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
568         fillAction = LoadDouble;
569     }
570 #elif USE(JSVALUE32_64)
571     ASSERT(info.registerFormat() == DataFormatDouble);
572     if (node->hasConstant()) {
573         node->asNumber(); // To get the assertion.
574         fillAction = SetDoubleConstant;
575     } else
576         fillAction = LoadDouble;
577 #endif
578
579     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
580 }
581     
582 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
583 {
584     switch (plan.spillAction()) {
585     case DoNothingForSpill:
586         break;
587     case Store32Tag:
588         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
589         break;
590     case Store32Payload:
591         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
592         break;
593     case StorePtr:
594         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
595         break;
596 #if USE(JSVALUE64)
597     case Store64:
598         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
599         break;
600 #endif
601     case StoreDouble:
602         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
603         break;
604     default:
605         RELEASE_ASSERT_NOT_REACHED();
606     }
607 }
608     
609 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
610 {
611 #if USE(JSVALUE32_64)
612     UNUSED_PARAM(canTrample);
613 #endif
614     switch (plan.fillAction()) {
615     case DoNothingForFill:
616         break;
617     case SetInt32Constant:
618         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
619         break;
620 #if USE(JSVALUE64)
621     case SetInt52Constant:
622         m_jit.move(Imm64(plan.node()->asAnyInt() << JSValue::int52ShiftAmount), plan.gpr());
623         break;
624     case SetStrictInt52Constant:
625         m_jit.move(Imm64(plan.node()->asAnyInt()), plan.gpr());
626         break;
627 #endif // USE(JSVALUE64)
628     case SetBooleanConstant:
629         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
630         break;
631     case SetCellConstant:
632         m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
633         break;
634 #if USE(JSVALUE64)
635     case SetTrustedJSConstant:
636         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
637         break;
638     case SetJSConstant:
639         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
640         break;
641     case SetDoubleConstant:
642         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
643         m_jit.move64ToDouble(canTrample, plan.fpr());
644         break;
645     case Load32PayloadBoxInt:
646         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
647         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
648         break;
649     case Load32PayloadConvertToInt52:
650         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
651         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
652         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
653         break;
654     case Load32PayloadSignExtend:
655         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
656         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
657         break;
658 #else
659     case SetJSConstantTag:
660         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
661         break;
662     case SetJSConstantPayload:
663         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
664         break;
665     case SetInt32Tag:
666         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
667         break;
668     case SetCellTag:
669         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
670         break;
671     case SetBooleanTag:
672         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
673         break;
674     case SetDoubleConstant:
675         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
676         break;
677 #endif
678     case Load32Tag:
679         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
680         break;
681     case Load32Payload:
682         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
683         break;
684     case LoadPtr:
685         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
686         break;
687 #if USE(JSVALUE64)
688     case Load64:
689         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
690         break;
691     case Load64ShiftInt52Right:
692         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
693         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
694         break;
695     case Load64ShiftInt52Left:
696         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
697         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
698         break;
699 #endif
700     case LoadDouble:
701         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
702         break;
703     default:
704         RELEASE_ASSERT_NOT_REACHED();
705     }
706 }
707     
708 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
709 {
710     switch (arrayMode.arrayClass()) {
711     case Array::OriginalArray: {
712         CRASH();
713 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
714         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
715         return result;
716 #endif
717     }
718         
719     case Array::Array:
720         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
721         return m_jit.branch32(
722             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
723         
724     case Array::NonArray:
725     case Array::OriginalNonArray:
726         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
727         return m_jit.branch32(
728             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
729         
730     case Array::PossiblyArray:
731         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
732         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
733     }
734     
735     RELEASE_ASSERT_NOT_REACHED();
736     return JITCompiler::Jump();
737 }
738
739 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
740 {
741     JITCompiler::JumpList result;
742     
743     switch (arrayMode.type()) {
744     case Array::Int32:
745         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
746
747     case Array::Double:
748         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
749
750     case Array::Contiguous:
751         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
752
753     case Array::Undecided:
754         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
755
756     case Array::ArrayStorage:
757     case Array::SlowPutArrayStorage: {
758         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
759         
760         if (arrayMode.isJSArray()) {
761             if (arrayMode.isSlowPut()) {
762                 result.append(
763                     m_jit.branchTest32(
764                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
765                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
766                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
767                 result.append(
768                     m_jit.branch32(
769                         MacroAssembler::Above, tempGPR,
770                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
771                 break;
772             }
773             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
774             result.append(
775                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
776             break;
777         }
778         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
779         if (arrayMode.isSlowPut()) {
780             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
781             result.append(
782                 m_jit.branch32(
783                     MacroAssembler::Above, tempGPR,
784                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
785             break;
786         }
787         result.append(
788             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
789         break;
790     }
791     default:
792         CRASH();
793         break;
794     }
795     
796     return result;
797 }
798
799 void SpeculativeJIT::checkArray(Node* node)
800 {
801     ASSERT(node->arrayMode().isSpecific());
802     ASSERT(!node->arrayMode().doesConversion());
803     
804     SpeculateCellOperand base(this, node->child1());
805     GPRReg baseReg = base.gpr();
806     
807     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
808         noResult(m_currentNode);
809         return;
810     }
811     
812     const ClassInfo* expectedClassInfo = 0;
813     
814     switch (node->arrayMode().type()) {
815     case Array::AnyTypedArray:
816     case Array::String:
817         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
818         break;
819     case Array::Int32:
820     case Array::Double:
821     case Array::Contiguous:
822     case Array::Undecided:
823     case Array::ArrayStorage:
824     case Array::SlowPutArrayStorage: {
825         GPRTemporary temp(this);
826         GPRReg tempGPR = temp.gpr();
827         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
828         speculationCheck(
829             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
830             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
831         
832         noResult(m_currentNode);
833         return;
834     }
835     case Array::DirectArguments:
836         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
837         noResult(m_currentNode);
838         return;
839     case Array::ScopedArguments:
840         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
841         noResult(m_currentNode);
842         return;
843     default:
844         speculateCellTypeWithoutTypeFiltering(
845             node->child1(), baseReg,
846             typeForTypedArrayType(node->arrayMode().typedArrayType()));
847         noResult(m_currentNode);
848         return;
849     }
850     
851     RELEASE_ASSERT(expectedClassInfo);
852     
853     GPRTemporary temp(this);
854     GPRTemporary temp2(this);
855     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
856     speculationCheck(
857         BadType, JSValueSource::unboxedCell(baseReg), node,
858         m_jit.branchPtr(
859             MacroAssembler::NotEqual,
860             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
861             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
862     
863     noResult(m_currentNode);
864 }
865
866 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
867 {
868     ASSERT(node->arrayMode().doesConversion());
869     
870     GPRTemporary temp(this);
871     GPRTemporary structure;
872     GPRReg tempGPR = temp.gpr();
873     GPRReg structureGPR = InvalidGPRReg;
874     
875     if (node->op() != ArrayifyToStructure) {
876         GPRTemporary realStructure(this);
877         structure.adopt(realStructure);
878         structureGPR = structure.gpr();
879     }
880         
881     // We can skip all that comes next if we already have array storage.
882     MacroAssembler::JumpList slowPath;
883     
884     if (node->op() == ArrayifyToStructure) {
885         slowPath.append(m_jit.branchWeakStructure(
886             JITCompiler::NotEqual,
887             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
888             node->structure()));
889     } else {
890         m_jit.load8(
891             MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
892         
893         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
894     }
895     
896     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
897         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
898     
899     noResult(m_currentNode);
900 }
901
902 void SpeculativeJIT::arrayify(Node* node)
903 {
904     ASSERT(node->arrayMode().isSpecific());
905     
906     SpeculateCellOperand base(this, node->child1());
907     
908     if (!node->child2()) {
909         arrayify(node, base.gpr(), InvalidGPRReg);
910         return;
911     }
912     
913     SpeculateInt32Operand property(this, node->child2());
914     
915     arrayify(node, base.gpr(), property.gpr());
916 }
917
918 GPRReg SpeculativeJIT::fillStorage(Edge edge)
919 {
920     VirtualRegister virtualRegister = edge->virtualRegister();
921     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
922     
923     switch (info.registerFormat()) {
924     case DataFormatNone: {
925         if (info.spillFormat() == DataFormatStorage) {
926             GPRReg gpr = allocate();
927             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
928             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
929             info.fillStorage(*m_stream, gpr);
930             return gpr;
931         }
932         
933         // Must be a cell; fill it as a cell and then return the pointer.
934         return fillSpeculateCell(edge);
935     }
936         
937     case DataFormatStorage: {
938         GPRReg gpr = info.gpr();
939         m_gprs.lock(gpr);
940         return gpr;
941     }
942         
943     default:
944         return fillSpeculateCell(edge);
945     }
946 }
947
948 void SpeculativeJIT::useChildren(Node* node)
949 {
950     if (node->flags() & NodeHasVarArgs) {
951         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
952             if (!!m_jit.graph().m_varArgChildren[childIdx])
953                 use(m_jit.graph().m_varArgChildren[childIdx]);
954         }
955     } else {
956         Edge child1 = node->child1();
957         if (!child1) {
958             ASSERT(!node->child2() && !node->child3());
959             return;
960         }
961         use(child1);
962         
963         Edge child2 = node->child2();
964         if (!child2) {
965             ASSERT(!node->child3());
966             return;
967         }
968         use(child2);
969         
970         Edge child3 = node->child3();
971         if (!child3)
972             return;
973         use(child3);
974     }
975 }
976
977 void SpeculativeJIT::compileTryGetById(Node* node)
978 {
979     switch (node->child1().useKind()) {
980     case CellUse: {
981         SpeculateCellOperand base(this, node->child1());
982         JSValueRegsTemporary result(this, Reuse, base);
983
984         JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
985         JSValueRegs resultRegs = result.regs();
986
987         base.use();
988
989         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), JITCompiler::Jump(), NeedToSpill, AccessType::GetPure);
990
991         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
992         break;
993     }
994
995     case UntypedUse: {
996         JSValueOperand base(this, node->child1());
997         JSValueRegsTemporary result(this, Reuse, base);
998
999         JSValueRegs baseRegs = base.jsValueRegs();
1000         JSValueRegs resultRegs = result.regs();
1001
1002         base.use();
1003
1004         JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
1005
1006         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), notCell, DontSpill, AccessType::GetPure);
1007
1008         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1009         break;
1010     }
1011
1012     default:
1013         DFG_CRASH(m_jit.graph(), node, "Bad use kind");
1014         break;
1015     } 
1016 }
1017
1018 void SpeculativeJIT::compileIn(Node* node)
1019 {
1020     SpeculateCellOperand base(this, node->child2());
1021     GPRReg baseGPR = base.gpr();
1022     
1023     if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
1024         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
1025             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
1026             
1027             GPRTemporary result(this);
1028             GPRReg resultGPR = result.gpr();
1029
1030             use(node->child1());
1031             
1032             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
1033             MacroAssembler::Label done = m_jit.label();
1034             
1035             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
1036             // we can cast it to const AtomicStringImpl* safely.
1037             auto slowPath = slowPathCall(
1038                 jump.m_jump, this, operationInOptimize,
1039                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
1040                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
1041             
1042             stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
1043             stubInfo->codeOrigin = node->origin.semantic;
1044             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
1045             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
1046 #if USE(JSVALUE32_64)
1047             stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
1048             stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
1049 #endif
1050             stubInfo->patch.usedRegisters = usedRegisters();
1051
1052             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
1053             addSlowPathGenerator(WTFMove(slowPath));
1054
1055             base.use();
1056
1057             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1058             return;
1059         }
1060     }
1061
1062     JSValueOperand key(this, node->child1());
1063     JSValueRegs regs = key.jsValueRegs();
1064         
1065     GPRFlushedCallResult result(this);
1066     GPRReg resultGPR = result.gpr();
1067         
1068     base.use();
1069     key.use();
1070         
1071     flushRegisters();
1072     callOperation(
1073         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
1074         baseGPR, regs);
1075     m_jit.exceptionCheck();
1076     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1077 }
1078
1079 void SpeculativeJIT::compileDeleteById(Node* node)
1080 {
1081     JSValueOperand value(this, node->child1());
1082     GPRFlushedCallResult result(this);
1083
1084     JSValueRegs valueRegs = value.jsValueRegs();
1085     GPRReg resultGPR = result.gpr();
1086
1087     value.use();
1088
1089     flushRegisters();
1090     callOperation(operationDeleteById, resultGPR, valueRegs, identifierUID(node->identifierNumber()));
1091     m_jit.exceptionCheck();
1092
1093     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1094 }
1095
1096 void SpeculativeJIT::compileDeleteByVal(Node* node)
1097 {
1098     JSValueOperand base(this, node->child1());
1099     JSValueOperand key(this, node->child2());
1100     GPRFlushedCallResult result(this);
1101
1102     JSValueRegs baseRegs = base.jsValueRegs();
1103     JSValueRegs keyRegs = key.jsValueRegs();
1104     GPRReg resultGPR = result.gpr();
1105
1106     base.use();
1107     key.use();
1108
1109     flushRegisters();
1110     callOperation(operationDeleteByVal, resultGPR, baseRegs, keyRegs);
1111     m_jit.exceptionCheck();
1112
1113     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1114 }
1115
1116 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1117 {
1118     unsigned branchIndexInBlock = detectPeepHoleBranch();
1119     if (branchIndexInBlock != UINT_MAX) {
1120         Node* branchNode = m_block->at(branchIndexInBlock);
1121
1122         ASSERT(node->adjustedRefCount() == 1);
1123         
1124         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1125     
1126         m_indexInBlock = branchIndexInBlock;
1127         m_currentNode = branchNode;
1128         
1129         return true;
1130     }
1131     
1132     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1133     
1134     return false;
1135 }
1136
1137 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1138 {
1139     unsigned branchIndexInBlock = detectPeepHoleBranch();
1140     if (branchIndexInBlock != UINT_MAX) {
1141         Node* branchNode = m_block->at(branchIndexInBlock);
1142
1143         ASSERT(node->adjustedRefCount() == 1);
1144         
1145         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1146     
1147         m_indexInBlock = branchIndexInBlock;
1148         m_currentNode = branchNode;
1149         
1150         return true;
1151     }
1152     
1153     nonSpeculativeNonPeepholeStrictEq(node, invert);
1154     
1155     return false;
1156 }
1157
1158 static const char* dataFormatString(DataFormat format)
1159 {
1160     // These values correspond to the DataFormat enum.
1161     const char* strings[] = {
1162         "[  ]",
1163         "[ i]",
1164         "[ d]",
1165         "[ c]",
1166         "Err!",
1167         "Err!",
1168         "Err!",
1169         "Err!",
1170         "[J ]",
1171         "[Ji]",
1172         "[Jd]",
1173         "[Jc]",
1174         "Err!",
1175         "Err!",
1176         "Err!",
1177         "Err!",
1178     };
1179     return strings[format];
1180 }
1181
1182 void SpeculativeJIT::dump(const char* label)
1183 {
1184     if (label)
1185         dataLogF("<%s>\n", label);
1186
1187     dataLogF("  gprs:\n");
1188     m_gprs.dump();
1189     dataLogF("  fprs:\n");
1190     m_fprs.dump();
1191     dataLogF("  VirtualRegisters:\n");
1192     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1193         GenerationInfo& info = m_generationInfo[i];
1194         if (info.alive())
1195             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1196         else
1197             dataLogF("    % 3d:[__][__]", i);
1198         if (info.registerFormat() == DataFormatDouble)
1199             dataLogF(":fpr%d\n", info.fpr());
1200         else if (info.registerFormat() != DataFormatNone
1201 #if USE(JSVALUE32_64)
1202             && !(info.registerFormat() & DataFormatJS)
1203 #endif
1204             ) {
1205             ASSERT(info.gpr() != InvalidGPRReg);
1206             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1207         } else
1208             dataLogF("\n");
1209     }
1210     if (label)
1211         dataLogF("</%s>\n", label);
1212 }
1213
1214 GPRTemporary::GPRTemporary()
1215     : m_jit(0)
1216     , m_gpr(InvalidGPRReg)
1217 {
1218 }
1219
1220 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1221     : m_jit(jit)
1222     , m_gpr(InvalidGPRReg)
1223 {
1224     m_gpr = m_jit->allocate();
1225 }
1226
1227 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1228     : m_jit(jit)
1229     , m_gpr(InvalidGPRReg)
1230 {
1231     m_gpr = m_jit->allocate(specific);
1232 }
1233
1234 #if USE(JSVALUE32_64)
1235 GPRTemporary::GPRTemporary(
1236     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1237     : m_jit(jit)
1238     , m_gpr(InvalidGPRReg)
1239 {
1240     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1241         m_gpr = m_jit->reuse(op1.gpr(which));
1242     else
1243         m_gpr = m_jit->allocate();
1244 }
1245 #endif // USE(JSVALUE32_64)
1246
1247 JSValueRegsTemporary::JSValueRegsTemporary() { }
1248
1249 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1250 #if USE(JSVALUE64)
1251     : m_gpr(jit)
1252 #else
1253     : m_payloadGPR(jit)
1254     , m_tagGPR(jit)
1255 #endif
1256 {
1257 }
1258
1259 #if USE(JSVALUE64)
1260 template<typename T>
1261 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord)
1262     : m_gpr(jit, Reuse, operand)
1263 {
1264 }
1265 #else
1266 template<typename T>
1267 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord resultWord)
1268 {
1269     if (resultWord == PayloadWord) {
1270         m_payloadGPR = GPRTemporary(jit, Reuse, operand);
1271         m_tagGPR = GPRTemporary(jit);
1272     } else {
1273         m_payloadGPR = GPRTemporary(jit);
1274         m_tagGPR = GPRTemporary(jit, Reuse, operand);
1275     }
1276 }
1277 #endif
1278
1279 #if USE(JSVALUE64)
1280 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1281 {
1282     m_gpr = GPRTemporary(jit, Reuse, operand);
1283 }
1284 #else
1285 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1286 {
1287     if (jit->canReuse(operand.node())) {
1288         m_payloadGPR = GPRTemporary(jit, Reuse, operand, PayloadWord);
1289         m_tagGPR = GPRTemporary(jit, Reuse, operand, TagWord);
1290     } else {
1291         m_payloadGPR = GPRTemporary(jit);
1292         m_tagGPR = GPRTemporary(jit);
1293     }
1294 }
1295 #endif
1296
1297 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1298
1299 JSValueRegs JSValueRegsTemporary::regs()
1300 {
1301 #if USE(JSVALUE64)
1302     return JSValueRegs(m_gpr.gpr());
1303 #else
1304     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1305 #endif
1306 }
1307
1308 void GPRTemporary::adopt(GPRTemporary& other)
1309 {
1310     ASSERT(!m_jit);
1311     ASSERT(m_gpr == InvalidGPRReg);
1312     ASSERT(other.m_jit);
1313     ASSERT(other.m_gpr != InvalidGPRReg);
1314     m_jit = other.m_jit;
1315     m_gpr = other.m_gpr;
1316     other.m_jit = 0;
1317     other.m_gpr = InvalidGPRReg;
1318 }
1319
1320 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1321     : m_jit(jit)
1322     , m_fpr(InvalidFPRReg)
1323 {
1324     m_fpr = m_jit->fprAllocate();
1325 }
1326
1327 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1328     : m_jit(jit)
1329     , m_fpr(InvalidFPRReg)
1330 {
1331     if (m_jit->canReuse(op1.node()))
1332         m_fpr = m_jit->reuse(op1.fpr());
1333     else
1334         m_fpr = m_jit->fprAllocate();
1335 }
1336
1337 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1338     : m_jit(jit)
1339     , m_fpr(InvalidFPRReg)
1340 {
1341     if (m_jit->canReuse(op1.node()))
1342         m_fpr = m_jit->reuse(op1.fpr());
1343     else if (m_jit->canReuse(op2.node()))
1344         m_fpr = m_jit->reuse(op2.fpr());
1345     else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1346         m_fpr = m_jit->reuse(op1.fpr());
1347     else
1348         m_fpr = m_jit->fprAllocate();
1349 }
1350
1351 #if USE(JSVALUE32_64)
1352 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1353     : m_jit(jit)
1354     , m_fpr(InvalidFPRReg)
1355 {
1356     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1357         m_fpr = m_jit->reuse(op1.fpr());
1358     else
1359         m_fpr = m_jit->fprAllocate();
1360 }
1361 #endif
1362
1363 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1364 {
1365     BasicBlock* taken = branchNode->branchData()->taken.block;
1366     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1367
1368     if (taken == nextBlock()) {
1369         condition = MacroAssembler::invert(condition);
1370         std::swap(taken, notTaken);
1371     }
1372
1373     SpeculateDoubleOperand op1(this, node->child1());
1374     SpeculateDoubleOperand op2(this, node->child2());
1375     
1376     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1377     jump(notTaken);
1378 }
1379
1380 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1381 {
1382     BasicBlock* taken = branchNode->branchData()->taken.block;
1383     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1384
1385     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1386     
1387     if (taken == nextBlock()) {
1388         condition = MacroAssembler::NotEqual;
1389         BasicBlock* tmp = taken;
1390         taken = notTaken;
1391         notTaken = tmp;
1392     }
1393
1394     SpeculateCellOperand op1(this, node->child1());
1395     SpeculateCellOperand op2(this, node->child2());
1396     
1397     GPRReg op1GPR = op1.gpr();
1398     GPRReg op2GPR = op2.gpr();
1399     
1400     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1401         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1402             speculationCheck(
1403                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1404         }
1405         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1406             speculationCheck(
1407                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1408         }
1409     } else {
1410         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1411             speculationCheck(
1412                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1413                 m_jit.branchIfNotObject(op1GPR));
1414         }
1415         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1416             m_jit.branchTest8(
1417                 MacroAssembler::NonZero, 
1418                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1419                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1420
1421         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1422             speculationCheck(
1423                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1424                 m_jit.branchIfNotObject(op2GPR));
1425         }
1426         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1427             m_jit.branchTest8(
1428                 MacroAssembler::NonZero, 
1429                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1430                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1431     }
1432
1433     branchPtr(condition, op1GPR, op2GPR, taken);
1434     jump(notTaken);
1435 }
1436
1437 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1438 {
1439     BasicBlock* taken = branchNode->branchData()->taken.block;
1440     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1441
1442     // The branch instruction will branch to the taken block.
1443     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1444     if (taken == nextBlock()) {
1445         condition = JITCompiler::invert(condition);
1446         BasicBlock* tmp = taken;
1447         taken = notTaken;
1448         notTaken = tmp;
1449     }
1450
1451     if (node->child1()->isInt32Constant()) {
1452         int32_t imm = node->child1()->asInt32();
1453         SpeculateBooleanOperand op2(this, node->child2());
1454         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1455     } else if (node->child2()->isInt32Constant()) {
1456         SpeculateBooleanOperand op1(this, node->child1());
1457         int32_t imm = node->child2()->asInt32();
1458         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1459     } else {
1460         SpeculateBooleanOperand op1(this, node->child1());
1461         SpeculateBooleanOperand op2(this, node->child2());
1462         branch32(condition, op1.gpr(), op2.gpr(), taken);
1463     }
1464
1465     jump(notTaken);
1466 }
1467
1468 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1469 {
1470     BasicBlock* taken = branchNode->branchData()->taken.block;
1471     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1472
1473     // The branch instruction will branch to the taken block.
1474     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1475     if (taken == nextBlock()) {
1476         condition = JITCompiler::invert(condition);
1477         BasicBlock* tmp = taken;
1478         taken = notTaken;
1479         notTaken = tmp;
1480     }
1481
1482     if (node->child1()->isInt32Constant()) {
1483         int32_t imm = node->child1()->asInt32();
1484         SpeculateInt32Operand op2(this, node->child2());
1485         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1486     } else if (node->child2()->isInt32Constant()) {
1487         SpeculateInt32Operand op1(this, node->child1());
1488         int32_t imm = node->child2()->asInt32();
1489         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1490     } else {
1491         SpeculateInt32Operand op1(this, node->child1());
1492         SpeculateInt32Operand op2(this, node->child2());
1493         branch32(condition, op1.gpr(), op2.gpr(), taken);
1494     }
1495
1496     jump(notTaken);
1497 }
1498
1499 // Returns true if the compare is fused with a subsequent branch.
1500 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1501 {
1502     // Fused compare & branch.
1503     unsigned branchIndexInBlock = detectPeepHoleBranch();
1504     if (branchIndexInBlock != UINT_MAX) {
1505         Node* branchNode = m_block->at(branchIndexInBlock);
1506
1507         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1508         // so can be no intervening nodes to also reference the compare. 
1509         ASSERT(node->adjustedRefCount() == 1);
1510
1511         if (node->isBinaryUseKind(Int32Use))
1512             compilePeepHoleInt32Branch(node, branchNode, condition);
1513 #if USE(JSVALUE64)
1514         else if (node->isBinaryUseKind(Int52RepUse))
1515             compilePeepHoleInt52Branch(node, branchNode, condition);
1516 #endif // USE(JSVALUE64)
1517         else if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1518             // Use non-peephole comparison, for now.
1519             return false;
1520         } else if (node->isBinaryUseKind(DoubleRepUse))
1521             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1522         else if (node->op() == CompareEq) {
1523             if (node->isBinaryUseKind(BooleanUse))
1524                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1525             else if (node->isBinaryUseKind(SymbolUse))
1526                 compilePeepHoleSymbolEquality(node, branchNode);
1527             else if (node->isBinaryUseKind(ObjectUse))
1528                 compilePeepHoleObjectEquality(node, branchNode);
1529             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1530                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1531             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1532                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1533             else if (!needsTypeCheck(node->child1(), SpecOther))
1534                 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1535             else if (!needsTypeCheck(node->child2(), SpecOther))
1536                 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1537             else {
1538                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1539                 return true;
1540             }
1541         } else {
1542             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1543             return true;
1544         }
1545
1546         use(node->child1());
1547         use(node->child2());
1548         m_indexInBlock = branchIndexInBlock;
1549         m_currentNode = branchNode;
1550         return true;
1551     }
1552     return false;
1553 }
1554
1555 void SpeculativeJIT::noticeOSRBirth(Node* node)
1556 {
1557     if (!node->hasVirtualRegister())
1558         return;
1559     
1560     VirtualRegister virtualRegister = node->virtualRegister();
1561     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1562     
1563     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1564 }
1565
1566 void SpeculativeJIT::compileMovHint(Node* node)
1567 {
1568     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1569     
1570     Node* child = node->child1().node();
1571     noticeOSRBirth(child);
1572     
1573     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1574 }
1575
1576 void SpeculativeJIT::bail(AbortReason reason)
1577 {
1578     if (verboseCompilationEnabled())
1579         dataLog("Bailing compilation.\n");
1580     m_compileOkay = true;
1581     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1582     clearGenerationInfo();
1583 }
1584
1585 void SpeculativeJIT::compileCurrentBlock()
1586 {
1587     ASSERT(m_compileOkay);
1588     
1589     if (!m_block)
1590         return;
1591     
1592     ASSERT(m_block->isReachable);
1593     
1594     m_jit.blockHeads()[m_block->index] = m_jit.label();
1595
1596     if (!m_block->intersectionOfCFAHasVisited) {
1597         // Don't generate code for basic blocks that are unreachable according to CFA.
1598         // But to be sure that nobody has generated a jump to this block, drop in a
1599         // breakpoint here.
1600         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1601         return;
1602     }
1603
1604     m_stream->appendAndLog(VariableEvent::reset());
1605     
1606     m_jit.jitAssertHasValidCallFrame();
1607     m_jit.jitAssertTagsInPlace();
1608     m_jit.jitAssertArgumentCountSane();
1609
1610     m_state.reset();
1611     m_state.beginBasicBlock(m_block);
1612     
1613     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1614         int operand = m_block->variablesAtHead.operandForIndex(i);
1615         Node* node = m_block->variablesAtHead[i];
1616         if (!node)
1617             continue; // No need to record dead SetLocal's.
1618         
1619         VariableAccessData* variable = node->variableAccessData();
1620         DataFormat format;
1621         if (!node->refCount())
1622             continue; // No need to record dead SetLocal's.
1623         format = dataFormatFor(variable->flushFormat());
1624         m_stream->appendAndLog(
1625             VariableEvent::setLocal(
1626                 VirtualRegister(operand),
1627                 variable->machineLocal(),
1628                 format));
1629     }
1630
1631     m_origin = NodeOrigin();
1632     
1633     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1634         m_currentNode = m_block->at(m_indexInBlock);
1635         
1636         // We may have hit a contradiction that the CFA was aware of but that the JIT
1637         // didn't cause directly.
1638         if (!m_state.isValid()) {
1639             bail(DFGBailedAtTopOfBlock);
1640             return;
1641         }
1642
1643         m_interpreter.startExecuting();
1644         m_interpreter.executeKnownEdgeTypes(m_currentNode);
1645         m_jit.setForNode(m_currentNode);
1646         m_origin = m_currentNode->origin;
1647         if (validationEnabled())
1648             m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1649         m_lastGeneratedNode = m_currentNode->op();
1650         
1651         ASSERT(m_currentNode->shouldGenerate());
1652         
1653         if (verboseCompilationEnabled()) {
1654             dataLogF(
1655                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1656                 (int)m_currentNode->index(),
1657                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1658             dataLog("\n");
1659         }
1660
1661         if (Options::validateDFGExceptionHandling() && (mayExit(m_jit.graph(), m_currentNode) != DoesNotExit || m_currentNode->isTerminal()))
1662             m_jit.jitReleaseAssertNoException();
1663
1664         m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.label(), m_origin.semantic);
1665
1666         compile(m_currentNode);
1667         
1668         if (belongsInMinifiedGraph(m_currentNode->op()))
1669             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1670         
1671 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1672         m_jit.clearRegisterAllocationOffsets();
1673 #endif
1674         
1675         if (!m_compileOkay) {
1676             bail(DFGBailedAtEndOfNode);
1677             return;
1678         }
1679         
1680         // Make sure that the abstract state is rematerialized for the next node.
1681         m_interpreter.executeEffects(m_indexInBlock);
1682     }
1683     
1684     // Perform the most basic verification that children have been used correctly.
1685     if (!ASSERT_DISABLED) {
1686         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1687             GenerationInfo& info = m_generationInfo[index];
1688             RELEASE_ASSERT(!info.alive());
1689         }
1690     }
1691 }
1692
1693 // If we are making type predictions about our arguments then
1694 // we need to check that they are correct on function entry.
1695 void SpeculativeJIT::checkArgumentTypes()
1696 {
1697     ASSERT(!m_currentNode);
1698     m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1699
1700     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1701         Node* node = m_jit.graph().m_arguments[i];
1702         if (!node) {
1703             // The argument is dead. We don't do any checks for such arguments.
1704             continue;
1705         }
1706         
1707         ASSERT(node->op() == SetArgument);
1708         ASSERT(node->shouldGenerate());
1709
1710         VariableAccessData* variableAccessData = node->variableAccessData();
1711         FlushFormat format = variableAccessData->flushFormat();
1712         
1713         if (format == FlushedJSValue)
1714             continue;
1715         
1716         VirtualRegister virtualRegister = variableAccessData->local();
1717
1718         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1719         
1720 #if USE(JSVALUE64)
1721         switch (format) {
1722         case FlushedInt32: {
1723             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1724             break;
1725         }
1726         case FlushedBoolean: {
1727             GPRTemporary temp(this);
1728             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1729             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1730             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1731             break;
1732         }
1733         case FlushedCell: {
1734             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1735             break;
1736         }
1737         default:
1738             RELEASE_ASSERT_NOT_REACHED();
1739             break;
1740         }
1741 #else
1742         switch (format) {
1743         case FlushedInt32: {
1744             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1745             break;
1746         }
1747         case FlushedBoolean: {
1748             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1749             break;
1750         }
1751         case FlushedCell: {
1752             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1753             break;
1754         }
1755         default:
1756             RELEASE_ASSERT_NOT_REACHED();
1757             break;
1758         }
1759 #endif
1760     }
1761
1762     m_origin = NodeOrigin();
1763 }
1764
1765 bool SpeculativeJIT::compile()
1766 {
1767     checkArgumentTypes();
1768     
1769     ASSERT(!m_currentNode);
1770     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1771         m_jit.setForBlockIndex(blockIndex);
1772         m_block = m_jit.graph().block(blockIndex);
1773         compileCurrentBlock();
1774     }
1775     linkBranches();
1776     return true;
1777 }
1778
1779 void SpeculativeJIT::createOSREntries()
1780 {
1781     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1782         BasicBlock* block = m_jit.graph().block(blockIndex);
1783         if (!block)
1784             continue;
1785         if (!block->isOSRTarget)
1786             continue;
1787         
1788         // Currently we don't have OSR entry trampolines. We could add them
1789         // here if need be.
1790         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1791     }
1792 }
1793
1794 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1795 {
1796     unsigned osrEntryIndex = 0;
1797     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1798         BasicBlock* block = m_jit.graph().block(blockIndex);
1799         if (!block)
1800             continue;
1801         if (!block->isOSRTarget)
1802             continue;
1803         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1804     }
1805     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1806     
1807     if (verboseCompilationEnabled()) {
1808         DumpContext dumpContext;
1809         dataLog("OSR Entries:\n");
1810         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1811             dataLog("    ", inContext(entryData, &dumpContext), "\n");
1812         if (!dumpContext.isEmpty())
1813             dumpContext.dump(WTF::dataFile());
1814     }
1815 }
1816
1817 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1818 {
1819     Edge child3 = m_jit.graph().varArgChild(node, 2);
1820     Edge child4 = m_jit.graph().varArgChild(node, 3);
1821
1822     ArrayMode arrayMode = node->arrayMode();
1823     
1824     GPRReg baseReg = base.gpr();
1825     GPRReg propertyReg = property.gpr();
1826     
1827     SpeculateDoubleOperand value(this, child3);
1828
1829     FPRReg valueReg = value.fpr();
1830     
1831     DFG_TYPE_CHECK(
1832         JSValueRegs(), child3, SpecFullRealNumber,
1833         m_jit.branchDouble(
1834             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1835     
1836     if (!m_compileOkay)
1837         return;
1838     
1839     StorageOperand storage(this, child4);
1840     GPRReg storageReg = storage.gpr();
1841
1842     if (node->op() == PutByValAlias) {
1843         // Store the value to the array.
1844         GPRReg propertyReg = property.gpr();
1845         FPRReg valueReg = value.fpr();
1846         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1847         
1848         noResult(m_currentNode);
1849         return;
1850     }
1851     
1852     GPRTemporary temporary;
1853     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1854
1855     MacroAssembler::Jump slowCase;
1856     
1857     if (arrayMode.isInBounds()) {
1858         speculationCheck(
1859             OutOfBounds, JSValueRegs(), 0,
1860             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1861     } else {
1862         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1863         
1864         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1865         
1866         if (!arrayMode.isOutOfBounds())
1867             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1868         
1869         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1870         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1871         
1872         inBounds.link(&m_jit);
1873     }
1874     
1875     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1876
1877     base.use();
1878     property.use();
1879     value.use();
1880     storage.use();
1881     
1882     if (arrayMode.isOutOfBounds()) {
1883         addSlowPathGenerator(
1884             slowPathCall(
1885                 slowCase, this,
1886                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1887                 NoResult, baseReg, propertyReg, valueReg));
1888     }
1889
1890     noResult(m_currentNode, UseChildrenCalledExplicitly);
1891 }
1892
1893 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1894 {
1895     SpeculateCellOperand string(this, node->child1());
1896     SpeculateStrictInt32Operand index(this, node->child2());
1897     StorageOperand storage(this, node->child3());
1898
1899     GPRReg stringReg = string.gpr();
1900     GPRReg indexReg = index.gpr();
1901     GPRReg storageReg = storage.gpr();
1902     
1903     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1904
1905     // unsigned comparison so we can filter out negative indices and indices that are too large
1906     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1907
1908     GPRTemporary scratch(this);
1909     GPRReg scratchReg = scratch.gpr();
1910
1911     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1912
1913     // Load the character into scratchReg
1914     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1915
1916     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1917     JITCompiler::Jump cont8Bit = m_jit.jump();
1918
1919     is16Bit.link(&m_jit);
1920
1921     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1922
1923     cont8Bit.link(&m_jit);
1924
1925     int32Result(scratchReg, m_currentNode);
1926 }
1927
1928 void SpeculativeJIT::compileGetByValOnString(Node* node)
1929 {
1930     SpeculateCellOperand base(this, node->child1());
1931     SpeculateStrictInt32Operand property(this, node->child2());
1932     StorageOperand storage(this, node->child3());
1933     GPRReg baseReg = base.gpr();
1934     GPRReg propertyReg = property.gpr();
1935     GPRReg storageReg = storage.gpr();
1936
1937     GPRTemporary scratch(this);
1938     GPRReg scratchReg = scratch.gpr();
1939 #if USE(JSVALUE32_64)
1940     GPRTemporary resultTag;
1941     GPRReg resultTagReg = InvalidGPRReg;
1942     if (node->arrayMode().isOutOfBounds()) {
1943         GPRTemporary realResultTag(this);
1944         resultTag.adopt(realResultTag);
1945         resultTagReg = resultTag.gpr();
1946     }
1947 #endif
1948
1949     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1950
1951     // unsigned comparison so we can filter out negative indices and indices that are too large
1952     JITCompiler::Jump outOfBounds = m_jit.branch32(
1953         MacroAssembler::AboveOrEqual, propertyReg,
1954         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1955     if (node->arrayMode().isInBounds())
1956         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1957
1958     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1959
1960     // Load the character into scratchReg
1961     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1962
1963     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1964     JITCompiler::Jump cont8Bit = m_jit.jump();
1965
1966     is16Bit.link(&m_jit);
1967
1968     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1969
1970     JITCompiler::Jump bigCharacter =
1971         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1972
1973     // 8 bit string values don't need the isASCII check.
1974     cont8Bit.link(&m_jit);
1975
1976     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1977     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1978     m_jit.loadPtr(scratchReg, scratchReg);
1979
1980     addSlowPathGenerator(
1981         slowPathCall(
1982             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1983
1984     if (node->arrayMode().isOutOfBounds()) {
1985 #if USE(JSVALUE32_64)
1986         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1987 #endif
1988
1989         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1990         bool prototypeChainIsSane = false;
1991         if (globalObject->stringPrototypeChainIsSane()) {
1992             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
1993             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
1994             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
1995             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
1996             // indexed properties either.
1997             // https://bugs.webkit.org/show_bug.cgi?id=144668
1998             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
1999             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2000             prototypeChainIsSane = globalObject->stringPrototypeChainIsSane();
2001         }
2002         if (prototypeChainIsSane) {
2003             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
2004             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2005             
2006 #if USE(JSVALUE64)
2007             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2008                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
2009 #else
2010             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2011                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
2012                 baseReg, propertyReg));
2013 #endif
2014         } else {
2015 #if USE(JSVALUE64)
2016             addSlowPathGenerator(
2017                 slowPathCall(
2018                     outOfBounds, this, operationGetByValStringInt,
2019                     scratchReg, baseReg, propertyReg));
2020 #else
2021             addSlowPathGenerator(
2022                 slowPathCall(
2023                     outOfBounds, this, operationGetByValStringInt,
2024                     resultTagReg, scratchReg, baseReg, propertyReg));
2025 #endif
2026         }
2027         
2028 #if USE(JSVALUE64)
2029         jsValueResult(scratchReg, m_currentNode);
2030 #else
2031         jsValueResult(resultTagReg, scratchReg, m_currentNode);
2032 #endif
2033     } else
2034         cellResult(scratchReg, m_currentNode);
2035 }
2036
2037 void SpeculativeJIT::compileFromCharCode(Node* node)
2038 {
2039     Edge& child = node->child1();
2040     if (child.useKind() == UntypedUse) {
2041         JSValueOperand opr(this, child);
2042         JSValueRegs oprRegs = opr.jsValueRegs();
2043 #if USE(JSVALUE64)
2044         GPRTemporary result(this);
2045         JSValueRegs resultRegs = JSValueRegs(result.gpr());
2046 #else
2047         GPRTemporary resultTag(this);
2048         GPRTemporary resultPayload(this);
2049         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2050 #endif
2051         flushRegisters();
2052         callOperation(operationStringFromCharCodeUntyped, resultRegs, oprRegs);
2053         m_jit.exceptionCheck();
2054         
2055         jsValueResult(resultRegs, node);
2056         return;
2057     }
2058
2059     SpeculateStrictInt32Operand property(this, child);
2060     GPRReg propertyReg = property.gpr();
2061     GPRTemporary smallStrings(this);
2062     GPRTemporary scratch(this);
2063     GPRReg scratchReg = scratch.gpr();
2064     GPRReg smallStringsReg = smallStrings.gpr();
2065
2066     JITCompiler::JumpList slowCases;
2067     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2068     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2069     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2070
2071     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2072     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2073     cellResult(scratchReg, m_currentNode);
2074 }
2075
2076 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2077 {
2078     VirtualRegister virtualRegister = node->virtualRegister();
2079     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2080
2081     switch (info.registerFormat()) {
2082     case DataFormatStorage:
2083         RELEASE_ASSERT_NOT_REACHED();
2084
2085     case DataFormatBoolean:
2086     case DataFormatCell:
2087         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2088         return GeneratedOperandTypeUnknown;
2089
2090     case DataFormatNone:
2091     case DataFormatJSCell:
2092     case DataFormatJS:
2093     case DataFormatJSBoolean:
2094     case DataFormatJSDouble:
2095         return GeneratedOperandJSValue;
2096
2097     case DataFormatJSInt32:
2098     case DataFormatInt32:
2099         return GeneratedOperandInteger;
2100
2101     default:
2102         RELEASE_ASSERT_NOT_REACHED();
2103         return GeneratedOperandTypeUnknown;
2104     }
2105 }
2106
2107 void SpeculativeJIT::compileValueToInt32(Node* node)
2108 {
2109     switch (node->child1().useKind()) {
2110 #if USE(JSVALUE64)
2111     case Int52RepUse: {
2112         SpeculateStrictInt52Operand op1(this, node->child1());
2113         GPRTemporary result(this, Reuse, op1);
2114         GPRReg op1GPR = op1.gpr();
2115         GPRReg resultGPR = result.gpr();
2116         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
2117         int32Result(resultGPR, node, DataFormatInt32);
2118         return;
2119     }
2120 #endif // USE(JSVALUE64)
2121         
2122     case DoubleRepUse: {
2123         GPRTemporary result(this);
2124         SpeculateDoubleOperand op1(this, node->child1());
2125         FPRReg fpr = op1.fpr();
2126         GPRReg gpr = result.gpr();
2127         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2128         
2129         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, operationToInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
2130         
2131         int32Result(gpr, node);
2132         return;
2133     }
2134     
2135     case NumberUse:
2136     case NotCellUse: {
2137         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2138         case GeneratedOperandInteger: {
2139             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2140             GPRTemporary result(this, Reuse, op1);
2141             m_jit.move(op1.gpr(), result.gpr());
2142             int32Result(result.gpr(), node, op1.format());
2143             return;
2144         }
2145         case GeneratedOperandJSValue: {
2146             GPRTemporary result(this);
2147 #if USE(JSVALUE64)
2148             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2149
2150             GPRReg gpr = op1.gpr();
2151             GPRReg resultGpr = result.gpr();
2152             FPRTemporary tempFpr(this);
2153             FPRReg fpr = tempFpr.fpr();
2154
2155             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2156             JITCompiler::JumpList converted;
2157
2158             if (node->child1().useKind() == NumberUse) {
2159                 DFG_TYPE_CHECK(
2160                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
2161                     m_jit.branchTest64(
2162                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2163             } else {
2164                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2165                 
2166                 DFG_TYPE_CHECK(
2167                     JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
2168                 
2169                 // It's not a cell: so true turns into 1 and all else turns into 0.
2170                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2171                 converted.append(m_jit.jump());
2172                 
2173                 isNumber.link(&m_jit);
2174             }
2175
2176             // First, if we get here we have a double encoded as a JSValue
2177             unboxDouble(gpr, resultGpr, fpr);
2178
2179             silentSpillAllRegisters(resultGpr);
2180             callOperation(operationToInt32, resultGpr, fpr);
2181             silentFillAllRegisters(resultGpr);
2182
2183             converted.append(m_jit.jump());
2184
2185             isInteger.link(&m_jit);
2186             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2187
2188             converted.link(&m_jit);
2189 #else
2190             Node* childNode = node->child1().node();
2191             VirtualRegister virtualRegister = childNode->virtualRegister();
2192             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2193
2194             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2195
2196             GPRReg payloadGPR = op1.payloadGPR();
2197             GPRReg resultGpr = result.gpr();
2198         
2199             JITCompiler::JumpList converted;
2200
2201             if (info.registerFormat() == DataFormatJSInt32)
2202                 m_jit.move(payloadGPR, resultGpr);
2203             else {
2204                 GPRReg tagGPR = op1.tagGPR();
2205                 FPRTemporary tempFpr(this);
2206                 FPRReg fpr = tempFpr.fpr();
2207                 FPRTemporary scratch(this);
2208
2209                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2210
2211                 if (node->child1().useKind() == NumberUse) {
2212                     DFG_TYPE_CHECK(
2213                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2214                         m_jit.branch32(
2215                             MacroAssembler::AboveOrEqual, tagGPR,
2216                             TrustedImm32(JSValue::LowestTag)));
2217                 } else {
2218                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2219                     
2220                     DFG_TYPE_CHECK(
2221                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2222                         m_jit.branchIfCell(op1.jsValueRegs()));
2223                     
2224                     // It's not a cell: so true turns into 1 and all else turns into 0.
2225                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2226                     m_jit.move(TrustedImm32(0), resultGpr);
2227                     converted.append(m_jit.jump());
2228                     
2229                     isBoolean.link(&m_jit);
2230                     m_jit.move(payloadGPR, resultGpr);
2231                     converted.append(m_jit.jump());
2232                     
2233                     isNumber.link(&m_jit);
2234                 }
2235
2236                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2237
2238                 silentSpillAllRegisters(resultGpr);
2239                 callOperation(operationToInt32, resultGpr, fpr);
2240                 silentFillAllRegisters(resultGpr);
2241
2242                 converted.append(m_jit.jump());
2243
2244                 isInteger.link(&m_jit);
2245                 m_jit.move(payloadGPR, resultGpr);
2246
2247                 converted.link(&m_jit);
2248             }
2249 #endif
2250             int32Result(resultGpr, node);
2251             return;
2252         }
2253         case GeneratedOperandTypeUnknown:
2254             RELEASE_ASSERT(!m_compileOkay);
2255             return;
2256         }
2257         RELEASE_ASSERT_NOT_REACHED();
2258         return;
2259     }
2260     
2261     default:
2262         ASSERT(!m_compileOkay);
2263         return;
2264     }
2265 }
2266
2267 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2268 {
2269     if (doesOverflow(node->arithMode())) {
2270         if (enableInt52()) {
2271             SpeculateInt32Operand op1(this, node->child1());
2272             GPRTemporary result(this, Reuse, op1);
2273             m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr());
2274             strictInt52Result(result.gpr(), node);
2275             return;
2276         }
2277         SpeculateInt32Operand op1(this, node->child1());
2278         FPRTemporary result(this);
2279             
2280         GPRReg inputGPR = op1.gpr();
2281         FPRReg outputFPR = result.fpr();
2282             
2283         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2284             
2285         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2286         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2287         positive.link(&m_jit);
2288             
2289         doubleResult(outputFPR, node);
2290         return;
2291     }
2292     
2293     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2294
2295     SpeculateInt32Operand op1(this, node->child1());
2296     GPRTemporary result(this);
2297
2298     m_jit.move(op1.gpr(), result.gpr());
2299
2300     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2301
2302     int32Result(result.gpr(), node, op1.format());
2303 }
2304
2305 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2306 {
2307     SpeculateDoubleOperand op1(this, node->child1());
2308     FPRTemporary scratch(this);
2309     GPRTemporary result(this);
2310     
2311     FPRReg valueFPR = op1.fpr();
2312     FPRReg scratchFPR = scratch.fpr();
2313     GPRReg resultGPR = result.gpr();
2314
2315     JITCompiler::JumpList failureCases;
2316     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2317     m_jit.branchConvertDoubleToInt32(
2318         valueFPR, resultGPR, failureCases, scratchFPR,
2319         shouldCheckNegativeZero(node->arithMode()));
2320     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2321
2322     int32Result(resultGPR, node);
2323 }
2324
2325 void SpeculativeJIT::compileDoubleRep(Node* node)
2326 {
2327     switch (node->child1().useKind()) {
2328     case RealNumberUse: {
2329         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2330         FPRTemporary result(this);
2331         
2332         JSValueRegs op1Regs = op1.jsValueRegs();
2333         FPRReg resultFPR = result.fpr();
2334         
2335 #if USE(JSVALUE64)
2336         GPRTemporary temp(this);
2337         GPRReg tempGPR = temp.gpr();
2338         m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR);
2339 #else
2340         FPRTemporary temp(this);
2341         FPRReg tempFPR = temp.fpr();
2342         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2343 #endif
2344         
2345         JITCompiler::Jump done = m_jit.branchDouble(
2346             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2347         
2348         DFG_TYPE_CHECK(
2349             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2350         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2351         
2352         done.link(&m_jit);
2353         
2354         doubleResult(resultFPR, node);
2355         return;
2356     }
2357     
2358     case NotCellUse:
2359     case NumberUse: {
2360         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2361
2362         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2363         if (isInt32Speculation(possibleTypes)) {
2364             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2365             FPRTemporary result(this);
2366             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2367             doubleResult(result.fpr(), node);
2368             return;
2369         }
2370
2371         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2372         FPRTemporary result(this);
2373
2374 #if USE(JSVALUE64)
2375         GPRTemporary temp(this);
2376
2377         GPRReg op1GPR = op1.gpr();
2378         GPRReg tempGPR = temp.gpr();
2379         FPRReg resultFPR = result.fpr();
2380         JITCompiler::JumpList done;
2381
2382         JITCompiler::Jump isInteger = m_jit.branch64(
2383             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2384
2385         if (node->child1().useKind() == NotCellUse) {
2386             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2387             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2388
2389             static const double zero = 0;
2390             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2391
2392             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2393             done.append(isNull);
2394
2395             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2396                 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2397
2398             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2399             static const double one = 1;
2400             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2401             done.append(m_jit.jump());
2402             done.append(isFalse);
2403
2404             isUndefined.link(&m_jit);
2405             static const double NaN = PNaN;
2406             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2407             done.append(m_jit.jump());
2408
2409             isNumber.link(&m_jit);
2410         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2411             typeCheck(
2412                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2413                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2414         }
2415
2416         unboxDouble(op1GPR, tempGPR, resultFPR);
2417         done.append(m_jit.jump());
2418     
2419         isInteger.link(&m_jit);
2420         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2421         done.link(&m_jit);
2422 #else // USE(JSVALUE64) -> this is the 32_64 case
2423         FPRTemporary temp(this);
2424     
2425         GPRReg op1TagGPR = op1.tagGPR();
2426         GPRReg op1PayloadGPR = op1.payloadGPR();
2427         FPRReg tempFPR = temp.fpr();
2428         FPRReg resultFPR = result.fpr();
2429         JITCompiler::JumpList done;
2430     
2431         JITCompiler::Jump isInteger = m_jit.branch32(
2432             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2433
2434         if (node->child1().useKind() == NotCellUse) {
2435             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2436             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2437
2438             static const double zero = 0;
2439             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2440
2441             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2442             done.append(isNull);
2443
2444             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2445
2446             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2447             static const double one = 1;
2448             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2449             done.append(m_jit.jump());
2450             done.append(isFalse);
2451
2452             isUndefined.link(&m_jit);
2453             static const double NaN = PNaN;
2454             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2455             done.append(m_jit.jump());
2456
2457             isNumber.link(&m_jit);
2458         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2459             typeCheck(
2460                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2461                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2462         }
2463
2464         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2465         done.append(m_jit.jump());
2466     
2467         isInteger.link(&m_jit);
2468         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2469         done.link(&m_jit);
2470 #endif // USE(JSVALUE64)
2471     
2472         doubleResult(resultFPR, node);
2473         return;
2474     }
2475         
2476 #if USE(JSVALUE64)
2477     case Int52RepUse: {
2478         SpeculateStrictInt52Operand value(this, node->child1());
2479         FPRTemporary result(this);
2480         
2481         GPRReg valueGPR = value.gpr();
2482         FPRReg resultFPR = result.fpr();
2483
2484         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2485         
2486         doubleResult(resultFPR, node);
2487         return;
2488     }
2489 #endif // USE(JSVALUE64)
2490         
2491     default:
2492         RELEASE_ASSERT_NOT_REACHED();
2493         return;
2494     }
2495 }
2496
2497 void SpeculativeJIT::compileValueRep(Node* node)
2498 {
2499     switch (node->child1().useKind()) {
2500     case DoubleRepUse: {
2501         SpeculateDoubleOperand value(this, node->child1());
2502         JSValueRegsTemporary result(this);
2503         
2504         FPRReg valueFPR = value.fpr();
2505         JSValueRegs resultRegs = result.regs();
2506         
2507         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2508         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2509         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2510         // local was purified.
2511         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2512             m_jit.purifyNaN(valueFPR);
2513
2514         boxDouble(valueFPR, resultRegs);
2515         
2516         jsValueResult(resultRegs, node);
2517         return;
2518     }
2519         
2520 #if USE(JSVALUE64)
2521     case Int52RepUse: {
2522         SpeculateStrictInt52Operand value(this, node->child1());
2523         GPRTemporary result(this);
2524         
2525         GPRReg valueGPR = value.gpr();
2526         GPRReg resultGPR = result.gpr();
2527         
2528         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2529         
2530         jsValueResult(resultGPR, node);
2531         return;
2532     }
2533 #endif // USE(JSVALUE64)
2534         
2535     default:
2536         RELEASE_ASSERT_NOT_REACHED();
2537         return;
2538     }
2539 }
2540
2541 static double clampDoubleToByte(double d)
2542 {
2543     d += 0.5;
2544     if (!(d > 0))
2545         d = 0;
2546     else if (d > 255)
2547         d = 255;
2548     return d;
2549 }
2550
2551 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2552 {
2553     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2554     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2555     jit.xorPtr(result, result);
2556     MacroAssembler::Jump clamped = jit.jump();
2557     tooBig.link(&jit);
2558     jit.move(JITCompiler::TrustedImm32(255), result);
2559     clamped.link(&jit);
2560     inBounds.link(&jit);
2561 }
2562
2563 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2564 {
2565     // Unordered compare so we pick up NaN
2566     static const double zero = 0;
2567     static const double byteMax = 255;
2568     static const double half = 0.5;
2569     jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2570     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2571     jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2572     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2573     
2574     jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2575     // FIXME: This should probably just use a floating point round!
2576     // https://bugs.webkit.org/show_bug.cgi?id=72054
2577     jit.addDouble(source, scratch);
2578     jit.truncateDoubleToInt32(scratch, result);   
2579     MacroAssembler::Jump truncatedInt = jit.jump();
2580     
2581     tooSmall.link(&jit);
2582     jit.xorPtr(result, result);
2583     MacroAssembler::Jump zeroed = jit.jump();
2584     
2585     tooBig.link(&jit);
2586     jit.move(JITCompiler::TrustedImm32(255), result);
2587     
2588     truncatedInt.link(&jit);
2589     zeroed.link(&jit);
2590
2591 }
2592
2593 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2594 {
2595     if (node->op() == PutByValAlias)
2596         return JITCompiler::Jump();
2597     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2598         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2599     if (view) {
2600         uint32_t length = view->length();
2601         Node* indexNode = m_jit.graph().child(node, 1).node();
2602         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2603             return JITCompiler::Jump();
2604         return m_jit.branch32(
2605             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2606     }
2607     return m_jit.branch32(
2608         MacroAssembler::AboveOrEqual, indexGPR,
2609         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2610 }
2611
2612 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2613 {
2614     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2615     if (!jump.isSet())
2616         return;
2617     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2618 }
2619
2620 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayIsNeuteredIfOutOfBounds(Node* node, GPRReg base, JITCompiler::Jump outOfBounds)
2621 {
2622     JITCompiler::Jump done;
2623     if (outOfBounds.isSet()) {
2624         done = m_jit.jump();
2625         if (node->arrayMode().isInBounds())
2626             speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2627         else {
2628             outOfBounds.link(&m_jit);
2629
2630             JITCompiler::Jump notWasteful = m_jit.branch32(
2631                 MacroAssembler::NotEqual,
2632                 MacroAssembler::Address(base, JSArrayBufferView::offsetOfMode()),
2633                 TrustedImm32(WastefulTypedArray));
2634
2635             JITCompiler::Jump hasNullVector = m_jit.branchTestPtr(
2636                 MacroAssembler::Zero,
2637                 MacroAssembler::Address(base, JSArrayBufferView::offsetOfVector()));
2638             speculationCheck(Uncountable, JSValueSource(), node, hasNullVector);
2639             notWasteful.link(&m_jit);
2640         }
2641     }
2642     return done;
2643 }
2644
2645 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2646 {
2647     ASSERT(isInt(type));
2648     
2649     SpeculateCellOperand base(this, node->child1());
2650     SpeculateStrictInt32Operand property(this, node->child2());
2651     StorageOperand storage(this, node->child3());
2652
2653     GPRReg baseReg = base.gpr();
2654     GPRReg propertyReg = property.gpr();
2655     GPRReg storageReg = storage.gpr();
2656
2657     GPRTemporary result(this);
2658     GPRReg resultReg = result.gpr();
2659
2660     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2661
2662     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2663     switch (elementSize(type)) {
2664     case 1:
2665         if (isSigned(type))
2666             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2667         else
2668             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2669         break;
2670     case 2:
2671         if (isSigned(type))
2672             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2673         else
2674             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2675         break;
2676     case 4:
2677         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2678         break;
2679     default:
2680         CRASH();
2681     }
2682     if (elementSize(type) < 4 || isSigned(type)) {
2683         int32Result(resultReg, node);
2684         return;
2685     }
2686     
2687     ASSERT(elementSize(type) == 4 && !isSigned(type));
2688     if (node->shouldSpeculateInt32()) {
2689         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2690         int32Result(resultReg, node);
2691         return;
2692     }
2693     
2694 #if USE(JSVALUE64)
2695     if (node->shouldSpeculateAnyInt()) {
2696         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2697         strictInt52Result(resultReg, node);
2698         return;
2699     }
2700 #endif
2701     
2702     FPRTemporary fresult(this);
2703     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2704     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2705     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2706     positive.link(&m_jit);
2707     doubleResult(fresult.fpr(), node);
2708 }
2709
2710 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2711 {
2712     ASSERT(isInt(type));
2713     
2714     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2715     GPRReg storageReg = storage.gpr();
2716     
2717     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2718     
2719     GPRTemporary value;
2720     GPRReg valueGPR = InvalidGPRReg;
2721     
2722     if (valueUse->isConstant()) {
2723         JSValue jsValue = valueUse->asJSValue();
2724         if (!jsValue.isNumber()) {
2725             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2726             noResult(node);
2727             return;
2728         }
2729         double d = jsValue.asNumber();
2730         if (isClamped(type)) {
2731             ASSERT(elementSize(type) == 1);
2732             d = clampDoubleToByte(d);
2733         }
2734         GPRTemporary scratch(this);
2735         GPRReg scratchReg = scratch.gpr();
2736         m_jit.move(Imm32(toInt32(d)), scratchReg);
2737         value.adopt(scratch);
2738         valueGPR = scratchReg;
2739     } else {
2740         switch (valueUse.useKind()) {
2741         case Int32Use: {
2742             SpeculateInt32Operand valueOp(this, valueUse);
2743             GPRTemporary scratch(this);
2744             GPRReg scratchReg = scratch.gpr();
2745             m_jit.move(valueOp.gpr(), scratchReg);
2746             if (isClamped(type)) {
2747                 ASSERT(elementSize(type) == 1);
2748                 compileClampIntegerToByte(m_jit, scratchReg);
2749             }
2750             value.adopt(scratch);
2751             valueGPR = scratchReg;
2752             break;
2753         }
2754             
2755 #if USE(JSVALUE64)
2756         case Int52RepUse: {
2757             SpeculateStrictInt52Operand valueOp(this, valueUse);
2758             GPRTemporary scratch(this);
2759             GPRReg scratchReg = scratch.gpr();
2760             m_jit.move(valueOp.gpr(), scratchReg);
2761             if (isClamped(type)) {
2762                 ASSERT(elementSize(type) == 1);
2763                 MacroAssembler::Jump inBounds = m_jit.branch64(
2764                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2765                 MacroAssembler::Jump tooBig = m_jit.branch64(
2766                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2767                 m_jit.move(TrustedImm32(0), scratchReg);
2768                 MacroAssembler::Jump clamped = m_jit.jump();
2769                 tooBig.link(&m_jit);
2770                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2771                 clamped.link(&m_jit);
2772                 inBounds.link(&m_jit);
2773             }
2774             value.adopt(scratch);
2775             valueGPR = scratchReg;
2776             break;
2777         }
2778 #endif // USE(JSVALUE64)
2779             
2780         case DoubleRepUse: {
2781             if (isClamped(type)) {
2782                 ASSERT(elementSize(type) == 1);
2783                 SpeculateDoubleOperand valueOp(this, valueUse);
2784                 GPRTemporary result(this);
2785                 FPRTemporary floatScratch(this);
2786                 FPRReg fpr = valueOp.fpr();
2787                 GPRReg gpr = result.gpr();
2788                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2789                 value.adopt(result);
2790                 valueGPR = gpr;
2791             } else {
2792                 SpeculateDoubleOperand valueOp(this, valueUse);
2793                 GPRTemporary result(this);
2794                 FPRReg fpr = valueOp.fpr();
2795                 GPRReg gpr = result.gpr();
2796                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2797                 m_jit.xorPtr(gpr, gpr);
2798                 MacroAssembler::Jump fixed = m_jit.jump();
2799                 notNaN.link(&m_jit);
2800                 
2801                 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2802                     fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2803                 
2804                 addSlowPathGenerator(slowPathCall(failed, this, operationToInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
2805                 
2806                 fixed.link(&m_jit);
2807                 value.adopt(result);
2808                 valueGPR = gpr;
2809             }
2810             break;
2811         }
2812             
2813         default:
2814             RELEASE_ASSERT_NOT_REACHED();
2815             break;
2816         }
2817     }
2818     
2819     ASSERT_UNUSED(valueGPR, valueGPR != property);
2820     ASSERT(valueGPR != base);
2821     ASSERT(valueGPR != storageReg);
2822     JITCompiler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2823
2824     switch (elementSize(type)) {
2825     case 1:
2826         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2827         break;
2828     case 2:
2829         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2830         break;
2831     case 4:
2832         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2833         break;
2834     default:
2835         CRASH();
2836     }
2837
2838     JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
2839     if (done.isSet())
2840         done.link(&m_jit);
2841     noResult(node);
2842 }
2843
2844 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2845 {
2846     ASSERT(isFloat(type));
2847     
2848     SpeculateCellOperand base(this, node->child1());
2849     SpeculateStrictInt32Operand property(this, node->child2());
2850     StorageOperand storage(this, node->child3());
2851
2852     GPRReg baseReg = base.gpr();
2853     GPRReg propertyReg = property.gpr();
2854     GPRReg storageReg = storage.gpr();
2855
2856     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2857
2858     FPRTemporary result(this);
2859     FPRReg resultReg = result.fpr();
2860     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2861     switch (elementSize(type)) {
2862     case 4:
2863         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2864         m_jit.convertFloatToDouble(resultReg, resultReg);
2865         break;
2866     case 8: {
2867         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2868         break;
2869     }
2870     default:
2871         RELEASE_ASSERT_NOT_REACHED();
2872     }
2873     
2874     doubleResult(resultReg, node);
2875 }
2876
2877 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2878 {
2879     ASSERT(isFloat(type));
2880     
2881     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2882     GPRReg storageReg = storage.gpr();
2883     
2884     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2885     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2886
2887     SpeculateDoubleOperand valueOp(this, valueUse);
2888     FPRTemporary scratch(this);
2889     FPRReg valueFPR = valueOp.fpr();
2890     FPRReg scratchFPR = scratch.fpr();
2891
2892     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2893     
2894     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2895     
2896     switch (elementSize(type)) {
2897     case 4: {
2898         m_jit.moveDouble(valueFPR, scratchFPR);
2899         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2900         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2901         break;
2902     }
2903     case 8:
2904         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2905         break;
2906     default:
2907         RELEASE_ASSERT_NOT_REACHED();
2908     }
2909
2910     JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
2911     if (done.isSet())
2912         done.link(&m_jit);
2913     noResult(node);
2914 }
2915
2916 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2917 {
2918     // Check that prototype is an object.
2919     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
2920     
2921     // Initialize scratchReg with the value being checked.
2922     m_jit.move(valueReg, scratchReg);
2923     
2924     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2925     MacroAssembler::Label loop(&m_jit);
2926     MacroAssembler::Jump performDefaultHasInstance = m_jit.branch8(MacroAssembler::Equal,
2927         MacroAssembler::Address(scratchReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType));
2928     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2929     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2930     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2931 #if USE(JSVALUE64)
2932     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2933 #else
2934     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2935 #endif
2936     
2937     // No match - result is false.
2938 #if USE(JSVALUE64)
2939     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2940 #else
2941     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2942 #endif
2943     MacroAssembler::JumpList doneJumps; 
2944     doneJumps.append(m_jit.jump());
2945
2946     performDefaultHasInstance.link(&m_jit);
2947     silentSpillAllRegisters(scratchReg);
2948     callOperation(operationDefaultHasInstance, scratchReg, valueReg, prototypeReg); 
2949     silentFillAllRegisters(scratchReg);
2950     m_jit.exceptionCheck();
2951 #if USE(JSVALUE64)
2952     m_jit.or32(TrustedImm32(ValueFalse), scratchReg);
2953 #endif
2954     doneJumps.append(m_jit.jump());
2955     
2956     isInstance.link(&m_jit);
2957 #if USE(JSVALUE64)
2958     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2959 #else
2960     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2961 #endif
2962     
2963     doneJumps.link(&m_jit);
2964 }
2965
2966 void SpeculativeJIT::compileCheckTypeInfoFlags(Node* node)
2967 {
2968     SpeculateCellOperand base(this, node->child1());
2969
2970     GPRReg baseGPR = base.gpr();
2971
2972     speculationCheck(BadTypeInfoFlags, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(node->typeInfoOperand())));
2973
2974     noResult(node);
2975 }
2976
2977 void SpeculativeJIT::compileInstanceOf(Node* node)
2978 {
2979     if (node->child1().useKind() == UntypedUse) {
2980         // It might not be a cell. Speculate less aggressively.
2981         // Or: it might only be used once (i.e. by us), so we get zero benefit
2982         // from speculating any more aggressively than we absolutely need to.
2983         
2984         JSValueOperand value(this, node->child1());
2985         SpeculateCellOperand prototype(this, node->child2());
2986         GPRTemporary scratch(this);
2987         GPRTemporary scratch2(this);
2988         
2989         GPRReg prototypeReg = prototype.gpr();
2990         GPRReg scratchReg = scratch.gpr();
2991         GPRReg scratch2Reg = scratch2.gpr();
2992         
2993         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
2994         GPRReg valueReg = value.jsValueRegs().payloadGPR();
2995         moveFalseTo(scratchReg);
2996
2997         MacroAssembler::Jump done = m_jit.jump();
2998         
2999         isCell.link(&m_jit);
3000         
3001         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
3002         
3003         done.link(&m_jit);
3004
3005         blessedBooleanResult(scratchReg, node);
3006         return;
3007     }
3008     
3009     SpeculateCellOperand value(this, node->child1());
3010     SpeculateCellOperand prototype(this, node->child2());
3011     
3012     GPRTemporary scratch(this);
3013     GPRTemporary scratch2(this);
3014     
3015     GPRReg valueReg = value.gpr();
3016     GPRReg prototypeReg = prototype.gpr();
3017     GPRReg scratchReg = scratch.gpr();
3018     GPRReg scratch2Reg = scratch2.gpr();
3019     
3020     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
3021
3022     blessedBooleanResult(scratchReg, node);
3023 }
3024
3025 template<typename SnippetGenerator, J_JITOperation_EJJ snippetSlowPathFunction>
3026 void SpeculativeJIT::emitUntypedBitOp(Node* node)
3027 {
3028     Edge& leftChild = node->child1();
3029     Edge& rightChild = node->child2();
3030
3031     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3032         JSValueOperand left(this, leftChild);
3033         JSValueOperand right(this, rightChild);
3034         JSValueRegs leftRegs = left.jsValueRegs();
3035         JSValueRegs rightRegs = right.jsValueRegs();
3036 #if USE(JSVALUE64)
3037         GPRTemporary result(this);
3038         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3039 #else
3040         GPRTemporary resultTag(this);
3041         GPRTemporary resultPayload(this);
3042         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3043 #endif
3044         flushRegisters();
3045         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3046         m_jit.exceptionCheck();
3047
3048         jsValueResult(resultRegs, node);
3049         return;
3050     }
3051
3052     Optional<JSValueOperand> left;
3053     Optional<JSValueOperand> right;
3054
3055     JSValueRegs leftRegs;
3056     JSValueRegs rightRegs;
3057
3058 #if USE(JSVALUE64)
3059     GPRTemporary result(this);
3060     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3061     GPRTemporary scratch(this);
3062     GPRReg scratchGPR = scratch.gpr();
3063 #else
3064     GPRTemporary resultTag(this);
3065     GPRTemporary resultPayload(this);
3066     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3067     GPRReg scratchGPR = resultTag.gpr();
3068 #endif
3069
3070     SnippetOperand leftOperand;
3071     SnippetOperand rightOperand;
3072
3073     // The snippet generator does not support both operands being constant. If the left
3074     // operand is already const, we'll ignore the right operand's constness.
3075     if (leftChild->isInt32Constant())
3076         leftOperand.setConstInt32(leftChild->asInt32());
3077     else if (rightChild->isInt32Constant())
3078         rightOperand.setConstInt32(rightChild->asInt32());
3079
3080     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3081
3082     if (!leftOperand.isConst()) {
3083         left = JSValueOperand(this, leftChild);
3084         leftRegs = left->jsValueRegs();
3085     }
3086     if (!rightOperand.isConst()) {
3087         right = JSValueOperand(this, rightChild);
3088         rightRegs = right->jsValueRegs();
3089     }
3090
3091     SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
3092     gen.generateFastPath(m_jit);
3093
3094     ASSERT(gen.didEmitFastPath());
3095     gen.endJumpList().append(m_jit.jump());
3096
3097     gen.slowPathJumpList().link(&m_jit);
3098     silentSpillAllRegisters(resultRegs);
3099
3100     if (leftOperand.isConst()) {
3101         leftRegs = resultRegs;
3102         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3103     } else if (rightOperand.isConst()) {
3104         rightRegs = resultRegs;
3105         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3106     }
3107
3108     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3109
3110     silentFillAllRegisters(resultRegs);
3111     m_jit.exceptionCheck();
3112
3113     gen.endJumpList().link(&m_jit);
3114     jsValueResult(resultRegs, node);
3115 }
3116
3117 void SpeculativeJIT::compileBitwiseOp(Node* node)
3118 {
3119     NodeType op = node->op();
3120     Edge& leftChild = node->child1();
3121     Edge& rightChild = node->child2();
3122
3123     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3124         switch (op) {
3125         case BitAnd:
3126             emitUntypedBitOp<JITBitAndGenerator, operationValueBitAnd>(node);
3127             return;
3128         case BitOr:
3129             emitUntypedBitOp<JITBitOrGenerator, operationValueBitOr>(node);
3130             return;
3131         case BitXor:
3132             emitUntypedBitOp<JITBitXorGenerator, operationValueBitXor>(node);
3133             return;
3134         default:
3135             RELEASE_ASSERT_NOT_REACHED();
3136         }
3137     }
3138
3139     if (leftChild->isInt32Constant()) {
3140         SpeculateInt32Operand op2(this, rightChild);
3141         GPRTemporary result(this, Reuse, op2);
3142
3143         bitOp(op, leftChild->asInt32(), op2.gpr(), result.gpr());
3144
3145         int32Result(result.gpr(), node);
3146
3147     } else if (rightChild->isInt32Constant()) {
3148         SpeculateInt32Operand op1(this, leftChild);
3149         GPRTemporary result(this, Reuse, op1);
3150
3151         bitOp(op, rightChild->asInt32(), op1.gpr(), result.gpr());
3152
3153         int32Result(result.gpr(), node);
3154
3155     } else {
3156         SpeculateInt32Operand op1(this, leftChild);
3157         SpeculateInt32Operand op2(this, rightChild);
3158         GPRTemporary result(this, Reuse, op1, op2);
3159         
3160         GPRReg reg1 = op1.gpr();
3161         GPRReg reg2 = op2.gpr();
3162         bitOp(op, reg1, reg2, result.gpr());
3163         
3164         int32Result(result.gpr(), node);
3165     }
3166 }
3167
3168 void SpeculativeJIT::emitUntypedRightShiftBitOp(Node* node)
3169 {
3170     J_JITOperation_EJJ snippetSlowPathFunction = node->op() == BitRShift
3171         ? operationValueBitRShift : operationValueBitURShift;
3172     JITRightShiftGenerator::ShiftType shiftType = node->op() == BitRShift
3173         ? JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift;
3174
3175     Edge& leftChild = node->child1();
3176     Edge& rightChild = node->child2();
3177
3178     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3179         JSValueOperand left(this, leftChild);
3180         JSValueOperand right(this, rightChild);
3181         JSValueRegs leftRegs = left.jsValueRegs();
3182         JSValueRegs rightRegs = right.jsValueRegs();
3183 #if USE(JSVALUE64)
3184         GPRTemporary result(this);
3185         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3186 #else
3187         GPRTemporary resultTag(this);
3188         GPRTemporary resultPayload(this);
3189         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3190 #endif
3191         flushRegisters();
3192         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3193         m_jit.exceptionCheck();
3194
3195         jsValueResult(resultRegs, node);
3196         return;
3197     }
3198
3199     Optional<JSValueOperand> left;
3200     Optional<JSValueOperand> right;
3201
3202     JSValueRegs leftRegs;
3203     JSValueRegs rightRegs;
3204
3205     FPRTemporary leftNumber(this);
3206     FPRReg leftFPR = leftNumber.fpr();
3207
3208 #if USE(JSVALUE64)
3209     GPRTemporary result(this);
3210     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3211     GPRTemporary scratch(this);
3212     GPRReg scratchGPR = scratch.gpr();
3213     FPRReg scratchFPR = InvalidFPRReg;
3214 #else
3215     GPRTemporary resultTag(this);
3216     GPRTemporary resultPayload(this);
3217     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3218     GPRReg scratchGPR = resultTag.gpr();
3219     FPRTemporary fprScratch(this);
3220     FPRReg scratchFPR = fprScratch.fpr();
3221 #endif
3222
3223     SnippetOperand leftOperand;
3224     SnippetOperand rightOperand;
3225
3226     // The snippet generator does not support both operands being constant. If the left
3227     // operand is already const, we'll ignore the right operand's constness.
3228     if (leftChild->isInt32Constant())
3229         leftOperand.setConstInt32(leftChild->asInt32());
3230     else if (rightChild->isInt32Constant())
3231         rightOperand.setConstInt32(rightChild->asInt32());
3232
3233     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3234
3235     if (!leftOperand.isConst()) {
3236         left = JSValueOperand(this, leftChild);
3237         leftRegs = left->jsValueRegs();
3238     }
3239     if (!rightOperand.isConst()) {
3240         right = JSValueOperand(this, rightChild);
3241         rightRegs = right->jsValueRegs();
3242     }
3243
3244     JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3245         leftFPR, scratchGPR, scratchFPR, shiftType);
3246     gen.generateFastPath(m_jit);
3247
3248     ASSERT(gen.didEmitFastPath());
3249     gen.endJumpList().append(m_jit.jump());
3250
3251     gen.slowPathJumpList().link(&m_jit);
3252     silentSpillAllRegisters(resultRegs);
3253
3254     if (leftOperand.isConst()) {
3255         leftRegs = resultRegs;
3256         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3257     } else if (rightOperand.isConst()) {
3258         rightRegs = resultRegs;
3259         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3260     }
3261
3262     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3263
3264     silentFillAllRegisters(resultRegs);
3265     m_jit.exceptionCheck();
3266
3267     gen.endJumpList().link(&m_jit);
3268     jsValueResult(resultRegs, node);
3269     return;
3270 }
3271
3272 void SpeculativeJIT::compileShiftOp(Node* node)
3273 {
3274     NodeType op = node->op();
3275     Edge& leftChild = node->child1();
3276     Edge& rightChild = node->child2();
3277
3278     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3279         switch (op) {
3280         case BitLShift:
3281             emitUntypedBitOp<JITLeftShiftGenerator, operationValueBitLShift>(node);
3282             return;
3283         case BitRShift:
3284         case BitURShift:
3285             emitUntypedRightShiftBitOp(node);
3286             return;
3287         default:
3288             RELEASE_ASSERT_NOT_REACHED();
3289         }
3290     }
3291
3292     if (rightChild->isInt32Constant()) {
3293         SpeculateInt32Operand op1(this, leftChild);
3294         GPRTemporary result(this, Reuse, op1);
3295
3296         shiftOp(op, op1.gpr(), rightChild->asInt32() & 0x1f, result.gpr());
3297
3298         int32Result(result.gpr(), node);
3299     } else {
3300         // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
3301         SpeculateInt32Operand op1(this, leftChild);
3302         SpeculateInt32Operand op2(this, rightChild);
3303         GPRTemporary result(this, Reuse, op1);
3304
3305         GPRReg reg1 = op1.gpr();
3306         GPRReg reg2 = op2.gpr();
3307         shiftOp(op, reg1, reg2, result.gpr());
3308
3309         int32Result(result.gpr(), node);
3310     }
3311 }
3312
3313 void SpeculativeJIT::compileValueAdd(Node* node)
3314 {
3315     Edge& leftChild = node->child1();
3316     Edge& rightChild = node->child2();
3317
3318     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3319         JSValueOperand left(this, leftChild);
3320         JSValueOperand right(this, rightChild);
3321         JSValueRegs leftRegs = left.jsValueRegs();
3322         JSValueRegs rightRegs = right.jsValueRegs();
3323 #if USE(JSVALUE64)
3324         GPRTemporary result(this);
3325         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3326 #else
3327         GPRTemporary resultTag(this);
3328         GPRTemporary resultPayload(this);
3329         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3330 #endif
3331         flushRegisters();
3332         callOperation(operationValueAddNotNumber, resultRegs, leftRegs, rightRegs);
3333         m_jit.exceptionCheck();
3334     
3335         jsValueResult(resultRegs, node);
3336         return;
3337     }
3338
3339     Optional<JSValueOperand> left;
3340     Optional<JSValueOperand> right;
3341
3342     JSValueRegs leftRegs;
3343     JSValueRegs rightRegs;
3344
3345     FPRTemporary leftNumber(this);
3346     FPRTemporary rightNumber(this);
3347     FPRReg leftFPR = leftNumber.fpr();
3348     FPRReg rightFPR = rightNumber.fpr();
3349
3350 #if USE(JSVALUE64)
3351     GPRTemporary result(this);
3352     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3353     GPRTemporary scratch(this);
3354     GPRReg scratchGPR = scratch.gpr();
3355     FPRReg scratchFPR = InvalidFPRReg;
3356 #else
3357     GPRTemporary resultTag(this);
3358     GPRTemporary resultPayload(this);
3359     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3360     GPRReg scratchGPR = resultTag.gpr();
3361     FPRTemporary fprScratch(this);
3362     FPRReg scratchFPR = fprScratch.fpr();
3363 #endif
3364
3365     SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3366     SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3367
3368     // The snippet generator does not support both operands being constant. If the left
3369     // operand is already const, we'll ignore the right operand's constness.
3370     if (leftChild->isInt32Constant())
3371         leftOperand.setConstInt32(leftChild->asInt32());
3372     else if (rightChild->isInt32Constant())
3373         rightOperand.setConstInt32(rightChild->asInt32());
3374
3375     ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3376
3377     if (!leftOperand.isConst()) {
3378         left = JSValueOperand(this, leftChild);
3379         leftRegs = left->jsValueRegs();
3380     }
3381     if (!rightOperand.isConst()) {
3382         right = JSValueOperand(this, rightChild);
3383         rightRegs = right->jsValueRegs();
3384     }
3385
3386     JITAddIC* addIC = m_jit.codeBlock()->addJITAddIC();
3387     Box<MathICGenerationState> addICGenerationState = Box<MathICGenerationState>::create();
3388     ArithProfile* arithProfile = m_jit.graph().baselineCodeBlockFor(node->origin.semantic)->arithProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex);
3389     addIC->m_generator = JITAddGenerator(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, leftFPR, rightFPR, scratchGPR, scratchFPR, arithProfile);
3390
3391     bool generatedInline = addIC->generateInline(m_jit, *addICGenerationState);
3392
3393     if (generatedInline) {
3394         ASSERT(!addICGenerationState->slowPathJumps.empty());
3395
3396         Vector<SilentRegisterSavePlan> savePlans;
3397         silentSpillAllRegistersImpl(false, savePlans, resultRegs);
3398
3399         auto done = m_jit.label();
3400
3401         addSlowPathGenerator([=, savePlans = WTFMove(savePlans)] () {
3402             addICGenerationState->slowPathJumps.link(&m_jit);
3403             addICGenerationState->slowPathStart = m_jit.label();
3404
3405             silentSpill(savePlans);
3406
3407             auto innerLeftRegs = leftRegs;
3408             auto innerRightRegs = rightRegs;
3409             if (leftOperand.isConst()) {
3410                 innerLeftRegs = resultRegs;
3411                 m_jit.moveValue(leftChild->asJSValue(), innerLeftRegs);
3412             } else if (rightOperand.isConst()) {
3413                 innerRightRegs = resultRegs;
3414                 m_jit.moveValue(rightChild->asJSValue(), innerRightRegs);
3415             }
3416
3417             if (addICGenerationState->shouldSlowPathRepatch)
3418                 addICGenerationState->slowPathCall = callOperation(operationValueAddOptimize, resultRegs, innerLeftRegs, innerRightRegs, addIC);
3419             else
3420                 addICGenerationState->slowPathCall = callOperation(operationValueAdd, resultRegs, innerLeftRegs, innerRightRegs);
3421
3422             silentFill(savePlans);
3423             m_jit.exceptionCheck();
3424             m_jit.jump().linkTo(done, &m_jit);
3425
3426             m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
3427                 addIC->finalizeInlineCode(*addICGenerationState, linkBuffer);
3428             });
3429         });
3430     } else {
3431         if (leftOperand.isConst()) {
3432             left = JSValueOperand(this, leftChild);
3433             leftRegs = left->jsValueRegs();
3434         } else if (rightOperand.isConst()) {
3435             right = JSValueOperand(this, rightChild);
3436             rightRegs = right->jsValueRegs();
3437         }
3438
3439         flushRegisters();
3440         callOperation(operationValueAdd, resultRegs, leftRegs, rightRegs);
3441         m_jit.exceptionCheck();
3442     }
3443
3444     jsValueResult(resultRegs, node);
3445     return;
3446 }
3447
3448 void SpeculativeJIT::compileInstanceOfCustom(Node* node)
3449 {
3450     // We could do something smarter here but this case is currently super rare and unless
3451     // Symbol.hasInstance becomes popular will likely remain that way.
3452
3453     JSValueOperand value(this, node->child1());
3454     SpeculateCellOperand constructor(this, node->child2());
3455     JSValueOperand hasInstanceValue(this, node->child3());
3456     GPRTemporary result(this);
3457
3458     JSValueRegs valueRegs = value.jsValueRegs();
3459     GPRReg constructorGPR = constructor.gpr();
3460     JSValueRegs hasInstanceRegs = hasInstanceValue.jsValueRegs();
3461     GPRReg resultGPR = result.gpr();
3462
3463     MacroAssembler::Jump slowCase = m_jit.jump();
3464
3465     addSlowPathGenerator(slowPathCall(slowCase, this, operationInstanceOfCustom, resultGPR, valueRegs, constructorGPR, hasInstanceRegs));
3466
3467     unblessedBooleanResult(resultGPR, node);
3468 }
3469
3470 void SpeculativeJIT::compileIsJSArray(Node* node)
3471 {
3472     JSValueOperand value(this, node->child1());
3473     GPRFlushedCallResult result(this);
3474
3475     JSValueRegs valueRegs = value.jsValueRegs();
3476     GPRReg resultGPR = result.gpr();
3477
3478     JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3479
3480     m_jit.compare8(JITCompiler::Equal,
3481         JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()),
3482         TrustedImm32(ArrayType),
3483         resultGPR);
3484     blessBoolean(resultGPR);
3485     JITCompiler::Jump done = m_jit.jump();
3486
3487     isNotCell.link(&m_jit);
3488     moveFalseTo(resultGPR);
3489
3490     done.link(&m_jit);
3491     blessedBooleanResult(resultGPR, node);
3492 }
3493
3494 void SpeculativeJIT::compileIsRegExpObject(Node* node)
3495 {
3496     JSValueOperand value(this, node->child1());
3497     GPRFlushedCallResult result(this);
3498
3499     JSValueRegs valueRegs = value.jsValueRegs();
3500     GPRReg resultGPR = result.gpr();
3501
3502     JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3503
3504     m_jit.compare8(JITCompiler::Equal,
3505         JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()),
3506         TrustedImm32(RegExpObjectType),
3507         resultGPR);
3508     blessBoolean(resultGPR);
3509     JITCompiler::Jump done = m_jit.jump();
3510
3511     isNotCell.link(&m_jit);
3512     moveFalseTo(resultGPR);
3513
3514     done.link(&m_jit);
3515     blessedBooleanResult(resultGPR, node);
3516 }
3517
3518 void SpeculativeJIT::compileIsTypedArrayView(Node* node)
3519 {
3520     JSValueOperand value(this, node->child1());
3521 #if USE(JSVALUE64)
3522     GPRTemporary result(this, Reuse, value);
3523 #else
3524     GPRTemporary result(this, Reuse, value, PayloadWord);
3525 #endif
3526
3527     JSValueRegs valueRegs = value.jsValueRegs();
3528     GPRReg resultGPR = result.gpr();
3529
3530     JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3531
3532     m_jit.load8(JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()), resultGPR);
3533     m_jit.sub32(TrustedImm32(Int8ArrayType), resultGPR);
3534     m_jit.compare32(JITCompiler::BelowOrEqual,
3535         resultGPR,
3536         TrustedImm32(Float64ArrayType - Int8ArrayType),
3537         resultGPR);
3538     blessBoolean(resultGPR);
3539     JITCompiler::Jump done = m_jit.jump();
3540
3541     isNotCell.link(&m_jit);
3542     moveFalseTo(resultGPR);
3543
3544     done.link(&m_jit);
3545     blessedBooleanResult(resultGPR, node);
3546 }
3547
3548 void SpeculativeJIT::compileCallObjectConstructor(Node* node)
3549 {
3550     RELEASE_ASSERT(node->child1().useKind() == UntypedUse);
3551     JSValueOperand value(this, node->child1());
3552 #if USE(JSVALUE64)
3553     GPRTemporary result(this, Reuse, value);
3554 #else
3555     GPRTemporary result(this, Reuse, value, PayloadWord);
3556 #endif
3557
3558     JSValueRegs valueRegs = value.jsValueRegs();
3559     GPRReg resultGPR = result.gpr();
3560
3561     MacroAssembler::JumpList slowCases;
3562     slowCases.append(m_jit.branchIfNotCell(valueRegs));
3563     slowCases.append(m_jit.branchIfNotObject(valueRegs.payloadGPR()));
3564     m_jit.move(valueRegs.payloadGPR(), resultGPR);
3565
3566     addSlowPathGenerator(slowPathCall(slowCases, this, operationObjectConstructor, resultGPR, m_jit.globalObjectFor(node->origin.semantic), valueRegs));
3567     cellResult(resultGPR, node);
3568 }
3569
3570 void SpeculativeJIT::compileArithAdd(Node* node)
3571 {
3572     switch (node->binaryUseKind()) {
3573     case Int32Use: {
3574         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3575
3576         if (node->child2()->isInt32Constant()) {
3577             SpeculateInt32Operand op1(this, node->child1());
3578             GPRTemporary result(this, Reuse, op1);
3579
3580             GPRReg gpr1 = op1.gpr();
3581             int32_t imm2 = node->child2()->asInt32();
3582             GPRReg gprResult = result.gpr();
3583
3584             if (!shouldCheckOverflow(node->arithMode())) {
3585                 m_jit.add32(Imm32(imm2), gpr1, gprResult);
3586                 int32Result(gprResult, node);
3587                 return;
3588             }
3589
3590             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, Imm32(imm2), gprResult);
3591             if (gpr1 == gprResult) {
3592                 speculationCheck(Overflow, JSValueRegs(), 0, check,
3593                     SpeculationRecovery(SpeculativeAddImmediate, gpr1, imm2));
3594             } else
3595                 speculationCheck(Overflow, JSValueRegs(), 0, check);
3596
3597             int32Result(gprResult, node);
3598             return;
3599         }
3600                 
3601         SpeculateInt32Operand op1(this, node->child1());
3602         SpeculateInt32Operand op2(this, node->child2());
3603         GPRTemporary result(this, Reuse, op1, op2);
3604
3605         GPRReg gpr1 = op1.gpr();
3606         GPRReg gpr2 = op2.gpr();
3607         GPRReg gprResult = result.gpr();
3608
3609         if (!shouldCheckOverflow(node->arithMode()))
3610             m_jit.add32(gpr1, gpr2, gprResult);
3611         else {
3612             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
3613                 
3614             if (gpr1 == gprResult)
3615                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
3616             else if (gpr2 == gprResult)
3617                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
3618             else
3619                 speculationCheck(Overflow, JSValueRegs(), 0, check);
3620         }
3621
3622         int32Result(gprResult, node);
3623         return;
3624     }
3625         
3626 #if USE(JSVALUE64)
3627     case Int52RepUse: {
3628         ASSERT(shouldCheckOverflow(node->arithMode()));
3629         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3630
3631         // Will we need an overflow check? If we can prove that neither input can be
3632         // Int52 then the overflow check will not be necessary.
3633         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52Only)
3634             && !m_state.forNode(node->child2()).couldBeType(SpecInt52Only)) {
3635             SpeculateWhicheverInt52Operand op1(this, node->child1());
3636             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3637             GPRTemporary result(this, Reuse, op1);
3638             m_jit.add64(op1.gpr(), op2.gpr(), result.gpr());
3639             int52Result(result.gpr(), node, op1.format());
3640             return;
3641         }
3642         
3643         SpeculateInt52Operand op1(this, node->child1());
3644         SpeculateInt52Operand op2(this, node->child2());
3645         GPRTemporary result(this);
3646         m_jit.move(op1.gpr(), result.gpr());
3647         speculationCheck(
3648             Int52Overflow, JSValueRegs(), 0,
3649             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3650         int52Result(result.gpr(), node);
3651         return;
3652     }
3653 #endif // USE(JSVALUE64)
3654     
3655     case DoubleRepUse: {
3656         SpeculateDoubleOperand op1(this, node->child1());
3657         SpeculateDoubleOperand op2(this, node->child2());
3658         FPRTemporary result(this, op1, op2);
3659
3660         FPRReg reg1 = op1.fpr();
3661         FPRReg reg2 = op2.fpr();
3662         m_jit.addDouble(reg1, reg2, result.fpr());
3663
3664         doubleResult(result.fpr(), node);
3665         return;
3666     }
3667         
3668     default:
3669         RELEASE_ASSERT_NOT_REACHED();
3670         break;
3671     }
3672 }
3673
3674 void SpeculativeJIT::compileMakeRope(Node* node)
3675 {
3676     ASSERT(node->child1().useKind() == KnownStringUse);
3677     ASSERT(node->child2().useKind() == KnownStringUse);
3678     ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
3679     
3680     SpeculateCellOperand op1(this, node->child1());
3681     SpeculateCellOperand op2(this, node->child2());
3682     SpeculateCellOperand op3(this, node->child3());
3683     GPRTemporary result(this);
3684     GPRTemporary allocator(this);
3685     GPRTemporary scratch(this);
3686     
3687     GPRReg opGPRs[3];
3688     unsigned numOpGPRs;
3689     opGPRs[0] = op1.gpr();
3690     opGPRs[1] = op2.gpr();
3691     if (node->child3()) {
3692         opGPRs[2] = op3.gpr();
3693         numOpGPRs = 3;
3694     } else {
3695         opGPRs[2] = InvalidGPRReg;
3696         numOpGPRs = 2;
3697     }
3698     GPRReg resultGPR = result.gpr();
3699     GPRReg allocatorGPR = allocator.gpr();
3700     GPRReg scratchGPR = scratch.gpr();
3701     
3702     JITCompiler::JumpList slowPath;
3703     MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithDestructor(sizeof(JSRopeString));
3704     m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
3705     emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
3706         
3707     m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
3708     for (unsigned i = 0; i < numOpGPRs; ++i)
3709         m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3710     for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
3711         m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3712     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
3713     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
3714     if (!ASSERT_DISABLED) {
3715         JITCompiler::Jump ok = m_jit.branch32(
3716             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
3717         m_jit.abortWithReason(DFGNegativeStringLength);
3718         ok.link(&m_jit);
3719     }
3720     for (unsigned i = 1; i < numOpGPRs; ++i) {
3721         m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
3722         speculationCheck(
3723             Uncountable, JSValueSource(), nullptr,
3724             m_jit.branchAdd32(
3725                 JITCompiler::Overflow,
3726                 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
3727     }
3728     m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
3729     m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
3730     if (!ASSERT_DISABLED) {
3731         JITCompiler::Jump ok = m_jit.branch32(
3732             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
3733         m_jit.abortWithReason(DFGNegativeStringLength);
3734         ok.link(&m_jit);
3735     }
3736     m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
3737     
3738     switch (numOpGPRs) {
3739     case 2:
3740         addSlowPathGenerator(slowPathCall(
3741             slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
3742         break;
3743     case 3:
3744         addSlowPathGenerator(slowPathCall(
3745             slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
3746         break;
3747     default:
3748         RELEASE_ASSERT_NOT_REACHED();
3749         break;
3750     }
3751         
3752     cellResult(resultGPR, node);
3753 }
3754
3755 void SpeculativeJIT::compileArithClz32(Node* node)
3756 {
3757     ASSERT_WITH_MESSAGE(node->child1().useKind() == Int32Use || node->child1().useKind() == KnownInt32Use, "The Fixup phase should have enforced a Int32 operand.");
3758     SpeculateInt32Operand value(this, node->child1());
3759     GPRTemporary result(this, Reuse, value);
3760     GPRReg valueReg = value.gpr();
3761     GPRReg resultReg = result.gpr();
3762     m_jit.countLeadingZeros32(valueReg, resultReg);
3763     int32Result(resultReg, node);
3764 }
3765
3766 void SpeculativeJIT::compileArithSub(Node* node)
3767 {
3768     switch (node->binaryUseKind()) {
3769     case Int32Use: {
3770         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3771         
3772         if (node->child2()->isInt32Constant()) {
3773             SpeculateInt32Operand op1(this, node->child1());
3774             int32_t imm2 = node->child2()->asInt32();
3775             GPRTemporary result(this);
3776
3777             if (!shouldCheckOverflow(node->arithMode())) {
3778                 m_jit.move(op1.gpr(), result.gpr());
3779                 m_jit.sub32(Imm32(imm2), result.gpr());
3780             } else {
3781                 GPRTemporary scratch(this);
3782                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
3783             }
3784
3785             int32Result(result.gpr(), node);
3786             return;
3787         }
3788             
3789         if (node->child1()->isInt32Constant()) {
3790             int32_t imm1 = node->child1()->asInt32();
3791             SpeculateInt32Operand op2(this, node->child2());
3792             GPRTemporary result(this);
3793                 
3794             m_jit.move(Imm32(imm1), result.gpr());
3795             if (!shouldCheckOverflow(node->arithMode()))
3796                 m_jit.sub32(op2.gpr(), result.gpr());
3797             else
3798                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3799                 
3800             int32Result(result.gpr(), node);
3801             return;
3802         }
3803             
3804         SpeculateInt32Operand op1(this, node->child1());
3805         SpeculateInt32Operand op2(this, node->child2());
3806         GPRTemporary result(this);
3807
3808         if (!shouldCheckOverflow(node->arithMode())) {
3809             m_jit.move(op1.gpr(), result.gpr());
3810             m_jit.sub32(op2.gpr(), result.gpr());
3811         } else
3812             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
3813
3814         int32Result(result.gpr(), node);
3815         return;
3816     }
3817         
3818 #if USE(JSVALUE64)
3819     case Int52RepUse: {
3820         ASSERT(shouldCheckOverflow(node->arithMode()));
3821         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3822
3823         // Will we need an overflow check? If we can prove that neither input can be
3824         // Int52 then the overflow check will not be necessary.
3825         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52Only)
3826             && !m_state.forNode(node->child2()).couldBeType(SpecInt52Only)) {
3827             SpeculateWhicheverInt52Operand op1(this, node->child1());
3828             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3829             GPRTemporary result(this, Reuse, op1);
3830             m_jit.move(op1.gpr(), result.gpr());
3831             m_jit.sub64(op2.gpr(), result.gpr());
3832             int52Result(result.gpr(), node, op1.format());
3833             return;
3834         }
3835         
3836         SpeculateInt52Operand op1(this, node->child1());
3837         SpeculateInt52Operand op2(this, node->child2());
3838         GPRTemporary result(this);
3839         m_jit.move(op1.gpr(), result.gpr());
3840         speculationCheck(
3841             Int52Overflow, JSValueRegs(), 0,
3842             m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3843         int52Result(result.gpr(), node);
3844         return;
3845     }
3846 #endif // USE(JSVALUE64)
3847
3848     case DoubleRepUse: {
3849         SpeculateDoubleOperand op1(this, node->child1());
3850         SpeculateDoubleOperand op2(this, node->child2());
3851         FPRTemporary result(this, op1);
3852
3853         FPRReg reg1 = op1.fpr();
3854         FPRReg reg2 = op2.fpr();
3855         m_jit.subDouble(reg1, reg2, result.fpr());
3856
3857         doubleResult(result.fpr(), node);
3858         return;
3859     }
3860
3861     case UntypedUse: {
3862         Edge& leftChild = node->child1();
3863         Edge& rightChild = node->child2();
3864
3865         JSValueOperand left(this, leftChild);
3866         JSValueOperand right(this, rightChild);
3867
3868         JSValueRegs leftRegs = left.jsValueRegs();
3869         JSValueRegs rightRegs = right.jsValueRegs();
3870
3871         FPRTemporary leftNumber(this);
3872         FPRTemporary rightNumber(this);
3873         FPRReg leftFPR = leftNumber.fpr();
3874         FPRReg rightFPR = rightNumber.fpr();
3875
3876 #if USE(JSVALUE64)
3877         GPRTemporary result(this);
3878         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3879         GPRTemporary scratch(this);
3880         GPRReg scratchGPR = scratch.gpr();
3881         FPRReg scratchFPR = InvalidFPRReg;
3882 #else
3883         GPRTemporary resultTag(this);
3884         GPRTemporary resultPayload(this);
3885         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3886         GPRReg scratchGPR = resultTag.gpr();
3887         FPRTemporary fprScratch(this);
3888         FPRReg scratchFPR = fprScratch.fpr();
3889 #endif
3890
3891         SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3892         SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3893
3894         JITSubGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3895             leftFPR, rightFPR, scratchGPR, scratchFPR);
3896         gen.generateFastPath(m_jit);
3897
3898         ASSERT(gen.didEmitFastPath());
3899         gen.endJumpList().append(m_jit.jump());
3900<