DFG backends shouldn't emit type checks at KnownBlah edges
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGOSRExitFuzz.h"
38 #include "DFGSaneStringGetByValSlowPathGenerator.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DirectArguments.h"
41 #include "JITAddGenerator.h"
42 #include "JITBitAndGenerator.h"
43 #include "JITBitOrGenerator.h"
44 #include "JITBitXorGenerator.h"
45 #include "JITDivGenerator.h"
46 #include "JITLeftShiftGenerator.h"
47 #include "JITMulGenerator.h"
48 #include "JITRightShiftGenerator.h"
49 #include "JITSubGenerator.h"
50 #include "JSCInlines.h"
51 #include "JSEnvironmentRecord.h"
52 #include "JSGeneratorFunction.h"
53 #include "JSLexicalEnvironment.h"
54 #include "LinkBuffer.h"
55 #include "RegExpConstructor.h"
56 #include "ScopedArguments.h"
57 #include "ScratchRegisterAllocator.h"
58 #include "WriteBarrierBuffer.h"
59 #include <wtf/MathExtras.h>
60
61 namespace JSC { namespace DFG {
62
63 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
64     : m_compileOkay(true)
65     , m_jit(jit)
66     , m_currentNode(0)
67     , m_lastGeneratedNode(LastNodeType)
68     , m_indexInBlock(0)
69     , m_generationInfo(m_jit.graph().frameRegisterCount())
70     , m_state(m_jit.graph())
71     , m_interpreter(m_jit.graph(), m_state)
72     , m_stream(&jit.jitCode()->variableEventStream)
73     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
74 {
75 }
76
77 SpeculativeJIT::~SpeculativeJIT()
78 {
79 }
80
81 void SpeculativeJIT::emitAllocateRawObject(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements, unsigned vectorLength)
82 {
83     IndexingType indexingType = structure->indexingType();
84     bool hasIndexingHeader = hasIndexedProperties(indexingType);
85
86     unsigned inlineCapacity = structure->inlineCapacity();
87     unsigned outOfLineCapacity = structure->outOfLineCapacity();
88     
89     GPRTemporary scratch(this);
90     GPRTemporary scratch2(this);
91     GPRReg scratchGPR = scratch.gpr();
92     GPRReg scratch2GPR = scratch2.gpr();
93
94     ASSERT(vectorLength >= numElements);
95     vectorLength = std::max(BASE_VECTOR_LEN, vectorLength);
96     
97     JITCompiler::JumpList slowCases;
98
99     size_t size = 0;
100     if (hasIndexingHeader)
101         size += vectorLength * sizeof(JSValue) + sizeof(IndexingHeader);
102     size += outOfLineCapacity * sizeof(JSValue);
103
104     if (size) {
105         slowCases.append(
106             emitAllocateBasicStorage(TrustedImm32(size), storageGPR));
107         if (hasIndexingHeader)
108             m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
109         else
110             m_jit.addPtr(TrustedImm32(sizeof(IndexingHeader)), storageGPR);
111     } else
112         m_jit.move(TrustedImmPtr(0), storageGPR);
113
114     size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
115     MarkedAllocator* allocatorPtr = &m_jit.vm()->heap.allocatorForObjectWithoutDestructor(allocationSize);
116     m_jit.move(TrustedImmPtr(allocatorPtr), scratchGPR);
117     emitAllocateJSObject(resultGPR, scratchGPR, TrustedImmPtr(structure), storageGPR, scratch2GPR, slowCases);
118
119     if (hasIndexingHeader)
120         m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
121
122     // I want a slow path that also loads out the storage pointer, and that's
123     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
124     // of work for a very small piece of functionality. :-/
125     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
126         slowCases, this, operationNewRawObject, resultGPR, storageGPR,
127         structure, vectorLength));
128
129     if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
130 #if USE(JSVALUE64)
131         m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
132         for (unsigned i = numElements; i < vectorLength; ++i)
133             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
134 #else
135         EncodedValueDescriptor value;
136         value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
137         for (unsigned i = numElements; i < vectorLength; ++i) {
138             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
139             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
140         }
141 #endif
142     }
143     
144     if (hasIndexingHeader)
145         m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
146 }
147
148 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
149 {
150     if (inlineCallFrame && !inlineCallFrame->isVarargs())
151         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
152     else {
153         VirtualRegister argumentCountRegister;
154         if (!inlineCallFrame)
155             argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
156         else
157             argumentCountRegister = inlineCallFrame->argumentCountRegister;
158         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
159         if (!includeThis)
160             m_jit.sub32(TrustedImm32(1), lengthGPR);
161     }
162 }
163
164 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
165 {
166     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
167 }
168
169 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
170 {
171     if (origin.inlineCallFrame) {
172         if (origin.inlineCallFrame->isClosureCall) {
173             m_jit.loadPtr(
174                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
175                 calleeGPR);
176         } else {
177             m_jit.move(
178                 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
179                 calleeGPR);
180         }
181     } else
182         m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), calleeGPR);
183 }
184
185 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
186 {
187     m_jit.addPtr(
188         TrustedImm32(
189             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
190         GPRInfo::callFrameRegister, startGPR);
191 }
192
193 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
194 {
195     if (!doOSRExitFuzzing())
196         return MacroAssembler::Jump();
197     
198     MacroAssembler::Jump result;
199     
200     m_jit.pushToSave(GPRInfo::regT0);
201     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
202     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
203     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
204     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
205     unsigned at = Options::fireOSRExitFuzzAt();
206     if (at || atOrAfter) {
207         unsigned threshold;
208         MacroAssembler::RelationalCondition condition;
209         if (atOrAfter) {
210             threshold = atOrAfter;
211             condition = MacroAssembler::Below;
212         } else {
213             threshold = at;
214             condition = MacroAssembler::NotEqual;
215         }
216         MacroAssembler::Jump ok = m_jit.branch32(
217             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
218         m_jit.popToRestore(GPRInfo::regT0);
219         result = m_jit.jump();
220         ok.link(&m_jit);
221     }
222     m_jit.popToRestore(GPRInfo::regT0);
223     
224     return result;
225 }
226
227 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
228 {
229     if (!m_compileOkay)
230         return;
231     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
232     if (fuzzJump.isSet()) {
233         JITCompiler::JumpList jumpsToFail;
234         jumpsToFail.append(fuzzJump);
235         jumpsToFail.append(jumpToFail);
236         m_jit.appendExitInfo(jumpsToFail);
237     } else
238         m_jit.appendExitInfo(jumpToFail);
239     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
240 }
241
242 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
243 {
244     if (!m_compileOkay)
245         return;
246     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
247     if (fuzzJump.isSet()) {
248         JITCompiler::JumpList myJumpsToFail;
249         myJumpsToFail.append(jumpsToFail);
250         myJumpsToFail.append(fuzzJump);
251         m_jit.appendExitInfo(myJumpsToFail);
252     } else
253         m_jit.appendExitInfo(jumpsToFail);
254     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
255 }
256
257 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
258 {
259     if (!m_compileOkay)
260         return OSRExitJumpPlaceholder();
261     unsigned index = m_jit.jitCode()->osrExit.size();
262     m_jit.appendExitInfo();
263     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
264     return OSRExitJumpPlaceholder(index);
265 }
266
267 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
268 {
269     return speculationCheck(kind, jsValueSource, nodeUse.node());
270 }
271
272 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
273 {
274     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
275 }
276
277 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
278 {
279     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
280 }
281
282 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
283 {
284     if (!m_compileOkay)
285         return;
286     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
287     m_jit.appendExitInfo(jumpToFail);
288     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
289 }
290
291 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
292 {
293     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
294 }
295
296 void SpeculativeJIT::emitInvalidationPoint(Node* node)
297 {
298     if (!m_compileOkay)
299         return;
300     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
301     m_jit.jitCode()->appendOSRExit(OSRExit(
302         UncountableInvalidation, JSValueSource(),
303         m_jit.graph().methodOfGettingAValueProfileFor(node),
304         this, m_stream->size()));
305     info.m_replacementSource = m_jit.watchpointLabel();
306     ASSERT(info.m_replacementSource.isSet());
307     noResult(node);
308 }
309
310 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
311 {
312     if (!m_compileOkay)
313         return;
314     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
315     m_compileOkay = false;
316     if (verboseCompilationEnabled())
317         dataLog("Bailing compilation.\n");
318 }
319
320 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
321 {
322     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
323 }
324
325 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind exitKind)
326 {
327     ASSERT(needsTypeCheck(edge, typesPassedThrough));
328     m_interpreter.filter(edge, typesPassedThrough);
329     speculationCheck(exitKind, source, edge.node(), jumpToFail);
330 }
331
332 RegisterSet SpeculativeJIT::usedRegisters()
333 {
334     RegisterSet result;
335     
336     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
337         GPRReg gpr = GPRInfo::toRegister(i);
338         if (m_gprs.isInUse(gpr))
339             result.set(gpr);
340     }
341     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
342         FPRReg fpr = FPRInfo::toRegister(i);
343         if (m_fprs.isInUse(fpr))
344             result.set(fpr);
345     }
346     
347     result.merge(RegisterSet::stubUnavailableRegisters());
348     
349     return result;
350 }
351
352 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
353 {
354     m_slowPathGenerators.append(WTFMove(slowPathGenerator));
355 }
356
357 void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder)
358 {
359     for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i) {
360         pcToCodeOriginMapBuilder.appendItem(m_jit.label(), m_slowPathGenerators[i]->origin().semantic);
361         m_slowPathGenerators[i]->generate(this);
362     }
363 }
364
365 // On Windows we need to wrap fmod; on other platforms we can call it directly.
366 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
367 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
368 static double JIT_OPERATION fmodAsDFGOperation(double x, double y)
369 {
370     return fmod(x, y);
371 }
372 #else
373 #define fmodAsDFGOperation fmod
374 #endif
375
376 void SpeculativeJIT::clearGenerationInfo()
377 {
378     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
379         m_generationInfo[i] = GenerationInfo();
380     m_gprs = RegisterBank<GPRInfo>();
381     m_fprs = RegisterBank<FPRInfo>();
382 }
383
384 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
385 {
386     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
387     Node* node = info.node();
388     DataFormat registerFormat = info.registerFormat();
389     ASSERT(registerFormat != DataFormatNone);
390     ASSERT(registerFormat != DataFormatDouble);
391         
392     SilentSpillAction spillAction;
393     SilentFillAction fillAction;
394         
395     if (!info.needsSpill())
396         spillAction = DoNothingForSpill;
397     else {
398 #if USE(JSVALUE64)
399         ASSERT(info.gpr() == source);
400         if (registerFormat == DataFormatInt32)
401             spillAction = Store32Payload;
402         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
403             spillAction = StorePtr;
404         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
405             spillAction = Store64;
406         else {
407             ASSERT(registerFormat & DataFormatJS);
408             spillAction = Store64;
409         }
410 #elif USE(JSVALUE32_64)
411         if (registerFormat & DataFormatJS) {
412             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
413             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
414         } else {
415             ASSERT(info.gpr() == source);
416             spillAction = Store32Payload;
417         }
418 #endif
419     }
420         
421     if (registerFormat == DataFormatInt32) {
422         ASSERT(info.gpr() == source);
423         ASSERT(isJSInt32(info.registerFormat()));
424         if (node->hasConstant()) {
425             ASSERT(node->isInt32Constant());
426             fillAction = SetInt32Constant;
427         } else
428             fillAction = Load32Payload;
429     } else if (registerFormat == DataFormatBoolean) {
430 #if USE(JSVALUE64)
431         RELEASE_ASSERT_NOT_REACHED();
432 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
433         fillAction = DoNothingForFill;
434 #endif
435 #elif USE(JSVALUE32_64)
436         ASSERT(info.gpr() == source);
437         if (node->hasConstant()) {
438             ASSERT(node->isBooleanConstant());
439             fillAction = SetBooleanConstant;
440         } else
441             fillAction = Load32Payload;
442 #endif
443     } else if (registerFormat == DataFormatCell) {
444         ASSERT(info.gpr() == source);
445         if (node->hasConstant()) {
446             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
447             node->asCell(); // To get the assertion.
448             fillAction = SetCellConstant;
449         } else {
450 #if USE(JSVALUE64)
451             fillAction = LoadPtr;
452 #else
453             fillAction = Load32Payload;
454 #endif
455         }
456     } else if (registerFormat == DataFormatStorage) {
457         ASSERT(info.gpr() == source);
458         fillAction = LoadPtr;
459     } else if (registerFormat == DataFormatInt52) {
460         if (node->hasConstant())
461             fillAction = SetInt52Constant;
462         else if (info.spillFormat() == DataFormatInt52)
463             fillAction = Load64;
464         else if (info.spillFormat() == DataFormatStrictInt52)
465             fillAction = Load64ShiftInt52Left;
466         else if (info.spillFormat() == DataFormatNone)
467             fillAction = Load64;
468         else {
469             RELEASE_ASSERT_NOT_REACHED();
470 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
471             fillAction = Load64; // Make GCC happy.
472 #endif
473         }
474     } else if (registerFormat == DataFormatStrictInt52) {
475         if (node->hasConstant())
476             fillAction = SetStrictInt52Constant;
477         else if (info.spillFormat() == DataFormatInt52)
478             fillAction = Load64ShiftInt52Right;
479         else if (info.spillFormat() == DataFormatStrictInt52)
480             fillAction = Load64;
481         else if (info.spillFormat() == DataFormatNone)
482             fillAction = Load64;
483         else {
484             RELEASE_ASSERT_NOT_REACHED();
485 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
486             fillAction = Load64; // Make GCC happy.
487 #endif
488         }
489     } else {
490         ASSERT(registerFormat & DataFormatJS);
491 #if USE(JSVALUE64)
492         ASSERT(info.gpr() == source);
493         if (node->hasConstant()) {
494             if (node->isCellConstant())
495                 fillAction = SetTrustedJSConstant;
496             else
497                 fillAction = SetJSConstant;
498         } else if (info.spillFormat() == DataFormatInt32) {
499             ASSERT(registerFormat == DataFormatJSInt32);
500             fillAction = Load32PayloadBoxInt;
501         } else
502             fillAction = Load64;
503 #else
504         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
505         if (node->hasConstant())
506             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
507         else if (info.payloadGPR() == source)
508             fillAction = Load32Payload;
509         else { // Fill the Tag
510             switch (info.spillFormat()) {
511             case DataFormatInt32:
512                 ASSERT(registerFormat == DataFormatJSInt32);
513                 fillAction = SetInt32Tag;
514                 break;
515             case DataFormatCell:
516                 ASSERT(registerFormat == DataFormatJSCell);
517                 fillAction = SetCellTag;
518                 break;
519             case DataFormatBoolean:
520                 ASSERT(registerFormat == DataFormatJSBoolean);
521                 fillAction = SetBooleanTag;
522                 break;
523             default:
524                 fillAction = Load32Tag;
525                 break;
526             }
527         }
528 #endif
529     }
530         
531     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
532 }
533     
534 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
535 {
536     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
537     Node* node = info.node();
538     ASSERT(info.registerFormat() == DataFormatDouble);
539
540     SilentSpillAction spillAction;
541     SilentFillAction fillAction;
542         
543     if (!info.needsSpill())
544         spillAction = DoNothingForSpill;
545     else {
546         ASSERT(!node->hasConstant());
547         ASSERT(info.spillFormat() == DataFormatNone);
548         ASSERT(info.fpr() == source);
549         spillAction = StoreDouble;
550     }
551         
552 #if USE(JSVALUE64)
553     if (node->hasConstant()) {
554         node->asNumber(); // To get the assertion.
555         fillAction = SetDoubleConstant;
556     } else {
557         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
558         fillAction = LoadDouble;
559     }
560 #elif USE(JSVALUE32_64)
561     ASSERT(info.registerFormat() == DataFormatDouble);
562     if (node->hasConstant()) {
563         node->asNumber(); // To get the assertion.
564         fillAction = SetDoubleConstant;
565     } else
566         fillAction = LoadDouble;
567 #endif
568
569     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
570 }
571     
572 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
573 {
574     switch (plan.spillAction()) {
575     case DoNothingForSpill:
576         break;
577     case Store32Tag:
578         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
579         break;
580     case Store32Payload:
581         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
582         break;
583     case StorePtr:
584         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
585         break;
586 #if USE(JSVALUE64)
587     case Store64:
588         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
589         break;
590 #endif
591     case StoreDouble:
592         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
593         break;
594     default:
595         RELEASE_ASSERT_NOT_REACHED();
596     }
597 }
598     
599 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
600 {
601 #if USE(JSVALUE32_64)
602     UNUSED_PARAM(canTrample);
603 #endif
604     switch (plan.fillAction()) {
605     case DoNothingForFill:
606         break;
607     case SetInt32Constant:
608         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
609         break;
610 #if USE(JSVALUE64)
611     case SetInt52Constant:
612         m_jit.move(Imm64(plan.node()->asAnyInt() << JSValue::int52ShiftAmount), plan.gpr());
613         break;
614     case SetStrictInt52Constant:
615         m_jit.move(Imm64(plan.node()->asAnyInt()), plan.gpr());
616         break;
617 #endif // USE(JSVALUE64)
618     case SetBooleanConstant:
619         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
620         break;
621     case SetCellConstant:
622         m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
623         break;
624 #if USE(JSVALUE64)
625     case SetTrustedJSConstant:
626         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
627         break;
628     case SetJSConstant:
629         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
630         break;
631     case SetDoubleConstant:
632         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
633         m_jit.move64ToDouble(canTrample, plan.fpr());
634         break;
635     case Load32PayloadBoxInt:
636         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
637         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
638         break;
639     case Load32PayloadConvertToInt52:
640         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
641         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
642         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
643         break;
644     case Load32PayloadSignExtend:
645         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
646         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
647         break;
648 #else
649     case SetJSConstantTag:
650         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
651         break;
652     case SetJSConstantPayload:
653         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
654         break;
655     case SetInt32Tag:
656         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
657         break;
658     case SetCellTag:
659         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
660         break;
661     case SetBooleanTag:
662         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
663         break;
664     case SetDoubleConstant:
665         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
666         break;
667 #endif
668     case Load32Tag:
669         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
670         break;
671     case Load32Payload:
672         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
673         break;
674     case LoadPtr:
675         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
676         break;
677 #if USE(JSVALUE64)
678     case Load64:
679         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
680         break;
681     case Load64ShiftInt52Right:
682         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
683         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
684         break;
685     case Load64ShiftInt52Left:
686         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
687         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
688         break;
689 #endif
690     case LoadDouble:
691         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
692         break;
693     default:
694         RELEASE_ASSERT_NOT_REACHED();
695     }
696 }
697     
698 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
699 {
700     switch (arrayMode.arrayClass()) {
701     case Array::OriginalArray: {
702         CRASH();
703 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
704         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
705         return result;
706 #endif
707     }
708         
709     case Array::Array:
710         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
711         return m_jit.branch32(
712             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
713         
714     case Array::NonArray:
715     case Array::OriginalNonArray:
716         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
717         return m_jit.branch32(
718             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
719         
720     case Array::PossiblyArray:
721         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
722         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
723     }
724     
725     RELEASE_ASSERT_NOT_REACHED();
726     return JITCompiler::Jump();
727 }
728
729 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
730 {
731     JITCompiler::JumpList result;
732     
733     switch (arrayMode.type()) {
734     case Array::Int32:
735         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
736
737     case Array::Double:
738         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
739
740     case Array::Contiguous:
741         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
742
743     case Array::Undecided:
744         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
745
746     case Array::ArrayStorage:
747     case Array::SlowPutArrayStorage: {
748         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
749         
750         if (arrayMode.isJSArray()) {
751             if (arrayMode.isSlowPut()) {
752                 result.append(
753                     m_jit.branchTest32(
754                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
755                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
756                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
757                 result.append(
758                     m_jit.branch32(
759                         MacroAssembler::Above, tempGPR,
760                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
761                 break;
762             }
763             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
764             result.append(
765                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
766             break;
767         }
768         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
769         if (arrayMode.isSlowPut()) {
770             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
771             result.append(
772                 m_jit.branch32(
773                     MacroAssembler::Above, tempGPR,
774                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
775             break;
776         }
777         result.append(
778             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
779         break;
780     }
781     default:
782         CRASH();
783         break;
784     }
785     
786     return result;
787 }
788
789 void SpeculativeJIT::checkArray(Node* node)
790 {
791     ASSERT(node->arrayMode().isSpecific());
792     ASSERT(!node->arrayMode().doesConversion());
793     
794     SpeculateCellOperand base(this, node->child1());
795     GPRReg baseReg = base.gpr();
796     
797     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
798         noResult(m_currentNode);
799         return;
800     }
801     
802     const ClassInfo* expectedClassInfo = 0;
803     
804     switch (node->arrayMode().type()) {
805     case Array::AnyTypedArray:
806     case Array::String:
807         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
808         break;
809     case Array::Int32:
810     case Array::Double:
811     case Array::Contiguous:
812     case Array::Undecided:
813     case Array::ArrayStorage:
814     case Array::SlowPutArrayStorage: {
815         GPRTemporary temp(this);
816         GPRReg tempGPR = temp.gpr();
817         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
818         speculationCheck(
819             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
820             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
821         
822         noResult(m_currentNode);
823         return;
824     }
825     case Array::DirectArguments:
826         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
827         noResult(m_currentNode);
828         return;
829     case Array::ScopedArguments:
830         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
831         noResult(m_currentNode);
832         return;
833     default:
834         speculateCellTypeWithoutTypeFiltering(
835             node->child1(), baseReg,
836             typeForTypedArrayType(node->arrayMode().typedArrayType()));
837         noResult(m_currentNode);
838         return;
839     }
840     
841     RELEASE_ASSERT(expectedClassInfo);
842     
843     GPRTemporary temp(this);
844     GPRTemporary temp2(this);
845     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
846     speculationCheck(
847         BadType, JSValueSource::unboxedCell(baseReg), node,
848         m_jit.branchPtr(
849             MacroAssembler::NotEqual,
850             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
851             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
852     
853     noResult(m_currentNode);
854 }
855
856 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
857 {
858     ASSERT(node->arrayMode().doesConversion());
859     
860     GPRTemporary temp(this);
861     GPRTemporary structure;
862     GPRReg tempGPR = temp.gpr();
863     GPRReg structureGPR = InvalidGPRReg;
864     
865     if (node->op() != ArrayifyToStructure) {
866         GPRTemporary realStructure(this);
867         structure.adopt(realStructure);
868         structureGPR = structure.gpr();
869     }
870         
871     // We can skip all that comes next if we already have array storage.
872     MacroAssembler::JumpList slowPath;
873     
874     if (node->op() == ArrayifyToStructure) {
875         slowPath.append(m_jit.branchWeakStructure(
876             JITCompiler::NotEqual,
877             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
878             node->structure()));
879     } else {
880         m_jit.load8(
881             MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
882         
883         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
884     }
885     
886     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
887         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
888     
889     noResult(m_currentNode);
890 }
891
892 void SpeculativeJIT::arrayify(Node* node)
893 {
894     ASSERT(node->arrayMode().isSpecific());
895     
896     SpeculateCellOperand base(this, node->child1());
897     
898     if (!node->child2()) {
899         arrayify(node, base.gpr(), InvalidGPRReg);
900         return;
901     }
902     
903     SpeculateInt32Operand property(this, node->child2());
904     
905     arrayify(node, base.gpr(), property.gpr());
906 }
907
908 GPRReg SpeculativeJIT::fillStorage(Edge edge)
909 {
910     VirtualRegister virtualRegister = edge->virtualRegister();
911     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
912     
913     switch (info.registerFormat()) {
914     case DataFormatNone: {
915         if (info.spillFormat() == DataFormatStorage) {
916             GPRReg gpr = allocate();
917             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
918             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
919             info.fillStorage(*m_stream, gpr);
920             return gpr;
921         }
922         
923         // Must be a cell; fill it as a cell and then return the pointer.
924         return fillSpeculateCell(edge);
925     }
926         
927     case DataFormatStorage: {
928         GPRReg gpr = info.gpr();
929         m_gprs.lock(gpr);
930         return gpr;
931     }
932         
933     default:
934         return fillSpeculateCell(edge);
935     }
936 }
937
938 void SpeculativeJIT::useChildren(Node* node)
939 {
940     if (node->flags() & NodeHasVarArgs) {
941         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
942             if (!!m_jit.graph().m_varArgChildren[childIdx])
943                 use(m_jit.graph().m_varArgChildren[childIdx]);
944         }
945     } else {
946         Edge child1 = node->child1();
947         if (!child1) {
948             ASSERT(!node->child2() && !node->child3());
949             return;
950         }
951         use(child1);
952         
953         Edge child2 = node->child2();
954         if (!child2) {
955             ASSERT(!node->child3());
956             return;
957         }
958         use(child2);
959         
960         Edge child3 = node->child3();
961         if (!child3)
962             return;
963         use(child3);
964     }
965 }
966
967 void SpeculativeJIT::compileTryGetById(Node* node)
968 {
969     switch (node->child1().useKind()) {
970     case CellUse: {
971         SpeculateCellOperand base(this, node->child1());
972         JSValueRegsTemporary result(this, Reuse, base);
973
974         JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
975         JSValueRegs resultRegs = result.regs();
976
977         base.use();
978
979         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), JITCompiler::Jump(), NeedToSpill, AccessType::GetPure);
980
981         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
982         break;
983     }
984
985     case UntypedUse: {
986         JSValueOperand base(this, node->child1());
987         JSValueRegsTemporary result(this, Reuse, base);
988
989         JSValueRegs baseRegs = base.jsValueRegs();
990         JSValueRegs resultRegs = result.regs();
991
992         base.use();
993
994         JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
995
996         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), notCell, DontSpill, AccessType::GetPure);
997
998         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
999         break;
1000     }
1001
1002     default:
1003         DFG_CRASH(m_jit.graph(), node, "Bad use kind");
1004         break;
1005     } 
1006 }
1007
1008 void SpeculativeJIT::compileIn(Node* node)
1009 {
1010     SpeculateCellOperand base(this, node->child2());
1011     GPRReg baseGPR = base.gpr();
1012     
1013     if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
1014         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
1015             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
1016             
1017             GPRTemporary result(this);
1018             GPRReg resultGPR = result.gpr();
1019
1020             use(node->child1());
1021             
1022             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
1023             MacroAssembler::Label done = m_jit.label();
1024             
1025             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
1026             // we can cast it to const AtomicStringImpl* safely.
1027             auto slowPath = slowPathCall(
1028                 jump.m_jump, this, operationInOptimize,
1029                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
1030                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
1031             
1032             stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
1033             stubInfo->codeOrigin = node->origin.semantic;
1034             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
1035             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
1036 #if USE(JSVALUE32_64)
1037             stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
1038             stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
1039 #endif
1040             stubInfo->patch.usedRegisters = usedRegisters();
1041
1042             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
1043             addSlowPathGenerator(WTFMove(slowPath));
1044
1045             base.use();
1046
1047             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1048             return;
1049         }
1050     }
1051
1052     JSValueOperand key(this, node->child1());
1053     JSValueRegs regs = key.jsValueRegs();
1054         
1055     GPRFlushedCallResult result(this);
1056     GPRReg resultGPR = result.gpr();
1057         
1058     base.use();
1059     key.use();
1060         
1061     flushRegisters();
1062     callOperation(
1063         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
1064         baseGPR, regs);
1065     m_jit.exceptionCheck();
1066     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1067 }
1068
1069 void SpeculativeJIT::compileDeleteById(Node* node)
1070 {
1071     JSValueOperand value(this, node->child1());
1072     GPRFlushedCallResult result(this);
1073
1074     JSValueRegs valueRegs = value.jsValueRegs();
1075     GPRReg resultGPR = result.gpr();
1076
1077     value.use();
1078
1079     flushRegisters();
1080     callOperation(operationDeleteById, resultGPR, valueRegs, identifierUID(node->identifierNumber()));
1081     m_jit.exceptionCheck();
1082
1083     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1084 }
1085
1086 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1087 {
1088     unsigned branchIndexInBlock = detectPeepHoleBranch();
1089     if (branchIndexInBlock != UINT_MAX) {
1090         Node* branchNode = m_block->at(branchIndexInBlock);
1091
1092         ASSERT(node->adjustedRefCount() == 1);
1093         
1094         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1095     
1096         m_indexInBlock = branchIndexInBlock;
1097         m_currentNode = branchNode;
1098         
1099         return true;
1100     }
1101     
1102     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1103     
1104     return false;
1105 }
1106
1107 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1108 {
1109     unsigned branchIndexInBlock = detectPeepHoleBranch();
1110     if (branchIndexInBlock != UINT_MAX) {
1111         Node* branchNode = m_block->at(branchIndexInBlock);
1112
1113         ASSERT(node->adjustedRefCount() == 1);
1114         
1115         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1116     
1117         m_indexInBlock = branchIndexInBlock;
1118         m_currentNode = branchNode;
1119         
1120         return true;
1121     }
1122     
1123     nonSpeculativeNonPeepholeStrictEq(node, invert);
1124     
1125     return false;
1126 }
1127
1128 static const char* dataFormatString(DataFormat format)
1129 {
1130     // These values correspond to the DataFormat enum.
1131     const char* strings[] = {
1132         "[  ]",
1133         "[ i]",
1134         "[ d]",
1135         "[ c]",
1136         "Err!",
1137         "Err!",
1138         "Err!",
1139         "Err!",
1140         "[J ]",
1141         "[Ji]",
1142         "[Jd]",
1143         "[Jc]",
1144         "Err!",
1145         "Err!",
1146         "Err!",
1147         "Err!",
1148     };
1149     return strings[format];
1150 }
1151
1152 void SpeculativeJIT::dump(const char* label)
1153 {
1154     if (label)
1155         dataLogF("<%s>\n", label);
1156
1157     dataLogF("  gprs:\n");
1158     m_gprs.dump();
1159     dataLogF("  fprs:\n");
1160     m_fprs.dump();
1161     dataLogF("  VirtualRegisters:\n");
1162     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1163         GenerationInfo& info = m_generationInfo[i];
1164         if (info.alive())
1165             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1166         else
1167             dataLogF("    % 3d:[__][__]", i);
1168         if (info.registerFormat() == DataFormatDouble)
1169             dataLogF(":fpr%d\n", info.fpr());
1170         else if (info.registerFormat() != DataFormatNone
1171 #if USE(JSVALUE32_64)
1172             && !(info.registerFormat() & DataFormatJS)
1173 #endif
1174             ) {
1175             ASSERT(info.gpr() != InvalidGPRReg);
1176             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1177         } else
1178             dataLogF("\n");
1179     }
1180     if (label)
1181         dataLogF("</%s>\n", label);
1182 }
1183
1184 GPRTemporary::GPRTemporary()
1185     : m_jit(0)
1186     , m_gpr(InvalidGPRReg)
1187 {
1188 }
1189
1190 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1191     : m_jit(jit)
1192     , m_gpr(InvalidGPRReg)
1193 {
1194     m_gpr = m_jit->allocate();
1195 }
1196
1197 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1198     : m_jit(jit)
1199     , m_gpr(InvalidGPRReg)
1200 {
1201     m_gpr = m_jit->allocate(specific);
1202 }
1203
1204 #if USE(JSVALUE32_64)
1205 GPRTemporary::GPRTemporary(
1206     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1207     : m_jit(jit)
1208     , m_gpr(InvalidGPRReg)
1209 {
1210     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1211         m_gpr = m_jit->reuse(op1.gpr(which));
1212     else
1213         m_gpr = m_jit->allocate();
1214 }
1215 #endif // USE(JSVALUE32_64)
1216
1217 JSValueRegsTemporary::JSValueRegsTemporary() { }
1218
1219 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1220 #if USE(JSVALUE64)
1221     : m_gpr(jit)
1222 #else
1223     : m_payloadGPR(jit)
1224     , m_tagGPR(jit)
1225 #endif
1226 {
1227 }
1228
1229 #if USE(JSVALUE64)
1230 template<typename T>
1231 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord)
1232     : m_gpr(jit, Reuse, operand)
1233 {
1234 }
1235 #else
1236 template<typename T>
1237 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord resultWord)
1238 {
1239     if (resultWord == PayloadWord) {
1240         m_payloadGPR = GPRTemporary(jit, Reuse, operand);
1241         m_tagGPR = GPRTemporary(jit);
1242     } else {
1243         m_payloadGPR = GPRTemporary(jit);
1244         m_tagGPR = GPRTemporary(jit, Reuse, operand);
1245     }
1246 }
1247 #endif
1248
1249 #if USE(JSVALUE64)
1250 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1251 {
1252     m_gpr = GPRTemporary(jit, Reuse, operand);
1253 }
1254 #else
1255 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1256 {
1257     if (jit->canReuse(operand.node())) {
1258         m_payloadGPR = GPRTemporary(jit, Reuse, operand, PayloadWord);
1259         m_tagGPR = GPRTemporary(jit, Reuse, operand, TagWord);
1260     } else {
1261         m_payloadGPR = GPRTemporary(jit);
1262         m_tagGPR = GPRTemporary(jit);
1263     }
1264 }
1265 #endif
1266
1267 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1268
1269 JSValueRegs JSValueRegsTemporary::regs()
1270 {
1271 #if USE(JSVALUE64)
1272     return JSValueRegs(m_gpr.gpr());
1273 #else
1274     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1275 #endif
1276 }
1277
1278 void GPRTemporary::adopt(GPRTemporary& other)
1279 {
1280     ASSERT(!m_jit);
1281     ASSERT(m_gpr == InvalidGPRReg);
1282     ASSERT(other.m_jit);
1283     ASSERT(other.m_gpr != InvalidGPRReg);
1284     m_jit = other.m_jit;
1285     m_gpr = other.m_gpr;
1286     other.m_jit = 0;
1287     other.m_gpr = InvalidGPRReg;
1288 }
1289
1290 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1291     : m_jit(jit)
1292     , m_fpr(InvalidFPRReg)
1293 {
1294     m_fpr = m_jit->fprAllocate();
1295 }
1296
1297 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1298     : m_jit(jit)
1299     , m_fpr(InvalidFPRReg)
1300 {
1301     if (m_jit->canReuse(op1.node()))
1302         m_fpr = m_jit->reuse(op1.fpr());
1303     else
1304         m_fpr = m_jit->fprAllocate();
1305 }
1306
1307 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1308     : m_jit(jit)
1309     , m_fpr(InvalidFPRReg)
1310 {
1311     if (m_jit->canReuse(op1.node()))
1312         m_fpr = m_jit->reuse(op1.fpr());
1313     else if (m_jit->canReuse(op2.node()))
1314         m_fpr = m_jit->reuse(op2.fpr());
1315     else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1316         m_fpr = m_jit->reuse(op1.fpr());
1317     else
1318         m_fpr = m_jit->fprAllocate();
1319 }
1320
1321 #if USE(JSVALUE32_64)
1322 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1323     : m_jit(jit)
1324     , m_fpr(InvalidFPRReg)
1325 {
1326     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1327         m_fpr = m_jit->reuse(op1.fpr());
1328     else
1329         m_fpr = m_jit->fprAllocate();
1330 }
1331 #endif
1332
1333 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1334 {
1335     BasicBlock* taken = branchNode->branchData()->taken.block;
1336     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1337
1338     if (taken == nextBlock()) {
1339         condition = MacroAssembler::invert(condition);
1340         std::swap(taken, notTaken);
1341     }
1342
1343     SpeculateDoubleOperand op1(this, node->child1());
1344     SpeculateDoubleOperand op2(this, node->child2());
1345     
1346     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1347     jump(notTaken);
1348 }
1349
1350 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1351 {
1352     BasicBlock* taken = branchNode->branchData()->taken.block;
1353     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1354
1355     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1356     
1357     if (taken == nextBlock()) {
1358         condition = MacroAssembler::NotEqual;
1359         BasicBlock* tmp = taken;
1360         taken = notTaken;
1361         notTaken = tmp;
1362     }
1363
1364     SpeculateCellOperand op1(this, node->child1());
1365     SpeculateCellOperand op2(this, node->child2());
1366     
1367     GPRReg op1GPR = op1.gpr();
1368     GPRReg op2GPR = op2.gpr();
1369     
1370     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1371         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1372             speculationCheck(
1373                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1374         }
1375         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1376             speculationCheck(
1377                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1378         }
1379     } else {
1380         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1381             speculationCheck(
1382                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1383                 m_jit.branchIfNotObject(op1GPR));
1384         }
1385         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1386             m_jit.branchTest8(
1387                 MacroAssembler::NonZero, 
1388                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1389                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1390
1391         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1392             speculationCheck(
1393                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1394                 m_jit.branchIfNotObject(op2GPR));
1395         }
1396         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1397             m_jit.branchTest8(
1398                 MacroAssembler::NonZero, 
1399                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1400                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1401     }
1402
1403     branchPtr(condition, op1GPR, op2GPR, taken);
1404     jump(notTaken);
1405 }
1406
1407 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1408 {
1409     BasicBlock* taken = branchNode->branchData()->taken.block;
1410     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1411
1412     // The branch instruction will branch to the taken block.
1413     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1414     if (taken == nextBlock()) {
1415         condition = JITCompiler::invert(condition);
1416         BasicBlock* tmp = taken;
1417         taken = notTaken;
1418         notTaken = tmp;
1419     }
1420
1421     if (node->child1()->isInt32Constant()) {
1422         int32_t imm = node->child1()->asInt32();
1423         SpeculateBooleanOperand op2(this, node->child2());
1424         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1425     } else if (node->child2()->isInt32Constant()) {
1426         SpeculateBooleanOperand op1(this, node->child1());
1427         int32_t imm = node->child2()->asInt32();
1428         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1429     } else {
1430         SpeculateBooleanOperand op1(this, node->child1());
1431         SpeculateBooleanOperand op2(this, node->child2());
1432         branch32(condition, op1.gpr(), op2.gpr(), taken);
1433     }
1434
1435     jump(notTaken);
1436 }
1437
1438 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1439 {
1440     BasicBlock* taken = branchNode->branchData()->taken.block;
1441     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1442
1443     // The branch instruction will branch to the taken block.
1444     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1445     if (taken == nextBlock()) {
1446         condition = JITCompiler::invert(condition);
1447         BasicBlock* tmp = taken;
1448         taken = notTaken;
1449         notTaken = tmp;
1450     }
1451
1452     if (node->child1()->isInt32Constant()) {
1453         int32_t imm = node->child1()->asInt32();
1454         SpeculateInt32Operand op2(this, node->child2());
1455         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1456     } else if (node->child2()->isInt32Constant()) {
1457         SpeculateInt32Operand op1(this, node->child1());
1458         int32_t imm = node->child2()->asInt32();
1459         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1460     } else {
1461         SpeculateInt32Operand op1(this, node->child1());
1462         SpeculateInt32Operand op2(this, node->child2());
1463         branch32(condition, op1.gpr(), op2.gpr(), taken);
1464     }
1465
1466     jump(notTaken);
1467 }
1468
1469 // Returns true if the compare is fused with a subsequent branch.
1470 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1471 {
1472     // Fused compare & branch.
1473     unsigned branchIndexInBlock = detectPeepHoleBranch();
1474     if (branchIndexInBlock != UINT_MAX) {
1475         Node* branchNode = m_block->at(branchIndexInBlock);
1476
1477         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1478         // so can be no intervening nodes to also reference the compare. 
1479         ASSERT(node->adjustedRefCount() == 1);
1480
1481         if (node->isBinaryUseKind(Int32Use))
1482             compilePeepHoleInt32Branch(node, branchNode, condition);
1483 #if USE(JSVALUE64)
1484         else if (node->isBinaryUseKind(Int52RepUse))
1485             compilePeepHoleInt52Branch(node, branchNode, condition);
1486 #endif // USE(JSVALUE64)
1487         else if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1488             // Use non-peephole comparison, for now.
1489             return false;
1490         } else if (node->isBinaryUseKind(DoubleRepUse))
1491             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1492         else if (node->op() == CompareEq) {
1493             if (node->isBinaryUseKind(BooleanUse))
1494                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1495             else if (node->isBinaryUseKind(SymbolUse))
1496                 compilePeepHoleSymbolEquality(node, branchNode);
1497             else if (node->isBinaryUseKind(ObjectUse))
1498                 compilePeepHoleObjectEquality(node, branchNode);
1499             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1500                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1501             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1502                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1503             else if (!needsTypeCheck(node->child1(), SpecOther))
1504                 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1505             else if (!needsTypeCheck(node->child2(), SpecOther))
1506                 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1507             else {
1508                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1509                 return true;
1510             }
1511         } else {
1512             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1513             return true;
1514         }
1515
1516         use(node->child1());
1517         use(node->child2());
1518         m_indexInBlock = branchIndexInBlock;
1519         m_currentNode = branchNode;
1520         return true;
1521     }
1522     return false;
1523 }
1524
1525 void SpeculativeJIT::noticeOSRBirth(Node* node)
1526 {
1527     if (!node->hasVirtualRegister())
1528         return;
1529     
1530     VirtualRegister virtualRegister = node->virtualRegister();
1531     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1532     
1533     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1534 }
1535
1536 void SpeculativeJIT::compileMovHint(Node* node)
1537 {
1538     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1539     
1540     Node* child = node->child1().node();
1541     noticeOSRBirth(child);
1542     
1543     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1544 }
1545
1546 void SpeculativeJIT::bail(AbortReason reason)
1547 {
1548     if (verboseCompilationEnabled())
1549         dataLog("Bailing compilation.\n");
1550     m_compileOkay = true;
1551     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1552     clearGenerationInfo();
1553 }
1554
1555 void SpeculativeJIT::compileCurrentBlock()
1556 {
1557     ASSERT(m_compileOkay);
1558     
1559     if (!m_block)
1560         return;
1561     
1562     ASSERT(m_block->isReachable);
1563     
1564     m_jit.blockHeads()[m_block->index] = m_jit.label();
1565
1566     if (!m_block->intersectionOfCFAHasVisited) {
1567         // Don't generate code for basic blocks that are unreachable according to CFA.
1568         // But to be sure that nobody has generated a jump to this block, drop in a
1569         // breakpoint here.
1570         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1571         return;
1572     }
1573
1574     m_stream->appendAndLog(VariableEvent::reset());
1575     
1576     m_jit.jitAssertHasValidCallFrame();
1577     m_jit.jitAssertTagsInPlace();
1578     m_jit.jitAssertArgumentCountSane();
1579
1580     m_state.reset();
1581     m_state.beginBasicBlock(m_block);
1582     
1583     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1584         int operand = m_block->variablesAtHead.operandForIndex(i);
1585         Node* node = m_block->variablesAtHead[i];
1586         if (!node)
1587             continue; // No need to record dead SetLocal's.
1588         
1589         VariableAccessData* variable = node->variableAccessData();
1590         DataFormat format;
1591         if (!node->refCount())
1592             continue; // No need to record dead SetLocal's.
1593         format = dataFormatFor(variable->flushFormat());
1594         m_stream->appendAndLog(
1595             VariableEvent::setLocal(
1596                 VirtualRegister(operand),
1597                 variable->machineLocal(),
1598                 format));
1599     }
1600
1601     m_origin = NodeOrigin();
1602     
1603     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1604         m_currentNode = m_block->at(m_indexInBlock);
1605         
1606         // We may have hit a contradiction that the CFA was aware of but that the JIT
1607         // didn't cause directly.
1608         if (!m_state.isValid()) {
1609             bail(DFGBailedAtTopOfBlock);
1610             return;
1611         }
1612
1613         m_interpreter.startExecuting();
1614         m_interpreter.executeKnownEdgeTypes(m_currentNode);
1615         m_jit.setForNode(m_currentNode);
1616         m_origin = m_currentNode->origin;
1617         if (validationEnabled())
1618             m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1619         m_lastGeneratedNode = m_currentNode->op();
1620         
1621         ASSERT(m_currentNode->shouldGenerate());
1622         
1623         if (verboseCompilationEnabled()) {
1624             dataLogF(
1625                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1626                 (int)m_currentNode->index(),
1627                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1628             dataLog("\n");
1629         }
1630
1631         if (Options::validateDFGExceptionHandling() && (mayExit(m_jit.graph(), m_currentNode) != DoesNotExit || m_currentNode->isTerminal()))
1632             m_jit.jitReleaseAssertNoException();
1633
1634         m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.label(), m_origin.semantic);
1635
1636         compile(m_currentNode);
1637         
1638         if (belongsInMinifiedGraph(m_currentNode->op()))
1639             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1640         
1641 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1642         m_jit.clearRegisterAllocationOffsets();
1643 #endif
1644         
1645         if (!m_compileOkay) {
1646             bail(DFGBailedAtEndOfNode);
1647             return;
1648         }
1649         
1650         // Make sure that the abstract state is rematerialized for the next node.
1651         m_interpreter.executeEffects(m_indexInBlock);
1652     }
1653     
1654     // Perform the most basic verification that children have been used correctly.
1655     if (!ASSERT_DISABLED) {
1656         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1657             GenerationInfo& info = m_generationInfo[index];
1658             RELEASE_ASSERT(!info.alive());
1659         }
1660     }
1661 }
1662
1663 // If we are making type predictions about our arguments then
1664 // we need to check that they are correct on function entry.
1665 void SpeculativeJIT::checkArgumentTypes()
1666 {
1667     ASSERT(!m_currentNode);
1668     m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1669
1670     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1671         Node* node = m_jit.graph().m_arguments[i];
1672         if (!node) {
1673             // The argument is dead. We don't do any checks for such arguments.
1674             continue;
1675         }
1676         
1677         ASSERT(node->op() == SetArgument);
1678         ASSERT(node->shouldGenerate());
1679
1680         VariableAccessData* variableAccessData = node->variableAccessData();
1681         FlushFormat format = variableAccessData->flushFormat();
1682         
1683         if (format == FlushedJSValue)
1684             continue;
1685         
1686         VirtualRegister virtualRegister = variableAccessData->local();
1687
1688         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1689         
1690 #if USE(JSVALUE64)
1691         switch (format) {
1692         case FlushedInt32: {
1693             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1694             break;
1695         }
1696         case FlushedBoolean: {
1697             GPRTemporary temp(this);
1698             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1699             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1700             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1701             break;
1702         }
1703         case FlushedCell: {
1704             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1705             break;
1706         }
1707         default:
1708             RELEASE_ASSERT_NOT_REACHED();
1709             break;
1710         }
1711 #else
1712         switch (format) {
1713         case FlushedInt32: {
1714             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1715             break;
1716         }
1717         case FlushedBoolean: {
1718             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1719             break;
1720         }
1721         case FlushedCell: {
1722             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1723             break;
1724         }
1725         default:
1726             RELEASE_ASSERT_NOT_REACHED();
1727             break;
1728         }
1729 #endif
1730     }
1731
1732     m_origin = NodeOrigin();
1733 }
1734
1735 bool SpeculativeJIT::compile()
1736 {
1737     checkArgumentTypes();
1738     
1739     ASSERT(!m_currentNode);
1740     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1741         m_jit.setForBlockIndex(blockIndex);
1742         m_block = m_jit.graph().block(blockIndex);
1743         compileCurrentBlock();
1744     }
1745     linkBranches();
1746     return true;
1747 }
1748
1749 void SpeculativeJIT::createOSREntries()
1750 {
1751     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1752         BasicBlock* block = m_jit.graph().block(blockIndex);
1753         if (!block)
1754             continue;
1755         if (!block->isOSRTarget)
1756             continue;
1757         
1758         // Currently we don't have OSR entry trampolines. We could add them
1759         // here if need be.
1760         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1761     }
1762 }
1763
1764 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1765 {
1766     unsigned osrEntryIndex = 0;
1767     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1768         BasicBlock* block = m_jit.graph().block(blockIndex);
1769         if (!block)
1770             continue;
1771         if (!block->isOSRTarget)
1772             continue;
1773         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1774     }
1775     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1776     
1777     if (verboseCompilationEnabled()) {
1778         DumpContext dumpContext;
1779         dataLog("OSR Entries:\n");
1780         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1781             dataLog("    ", inContext(entryData, &dumpContext), "\n");
1782         if (!dumpContext.isEmpty())
1783             dumpContext.dump(WTF::dataFile());
1784     }
1785 }
1786
1787 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1788 {
1789     Edge child3 = m_jit.graph().varArgChild(node, 2);
1790     Edge child4 = m_jit.graph().varArgChild(node, 3);
1791
1792     ArrayMode arrayMode = node->arrayMode();
1793     
1794     GPRReg baseReg = base.gpr();
1795     GPRReg propertyReg = property.gpr();
1796     
1797     SpeculateDoubleOperand value(this, child3);
1798
1799     FPRReg valueReg = value.fpr();
1800     
1801     DFG_TYPE_CHECK(
1802         JSValueRegs(), child3, SpecFullRealNumber,
1803         m_jit.branchDouble(
1804             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1805     
1806     if (!m_compileOkay)
1807         return;
1808     
1809     StorageOperand storage(this, child4);
1810     GPRReg storageReg = storage.gpr();
1811
1812     if (node->op() == PutByValAlias) {
1813         // Store the value to the array.
1814         GPRReg propertyReg = property.gpr();
1815         FPRReg valueReg = value.fpr();
1816         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1817         
1818         noResult(m_currentNode);
1819         return;
1820     }
1821     
1822     GPRTemporary temporary;
1823     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1824
1825     MacroAssembler::Jump slowCase;
1826     
1827     if (arrayMode.isInBounds()) {
1828         speculationCheck(
1829             OutOfBounds, JSValueRegs(), 0,
1830             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1831     } else {
1832         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1833         
1834         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1835         
1836         if (!arrayMode.isOutOfBounds())
1837             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1838         
1839         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1840         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1841         
1842         inBounds.link(&m_jit);
1843     }
1844     
1845     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1846
1847     base.use();
1848     property.use();
1849     value.use();
1850     storage.use();
1851     
1852     if (arrayMode.isOutOfBounds()) {
1853         addSlowPathGenerator(
1854             slowPathCall(
1855                 slowCase, this,
1856                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1857                 NoResult, baseReg, propertyReg, valueReg));
1858     }
1859
1860     noResult(m_currentNode, UseChildrenCalledExplicitly);
1861 }
1862
1863 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1864 {
1865     SpeculateCellOperand string(this, node->child1());
1866     SpeculateStrictInt32Operand index(this, node->child2());
1867     StorageOperand storage(this, node->child3());
1868
1869     GPRReg stringReg = string.gpr();
1870     GPRReg indexReg = index.gpr();
1871     GPRReg storageReg = storage.gpr();
1872     
1873     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1874
1875     // unsigned comparison so we can filter out negative indices and indices that are too large
1876     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1877
1878     GPRTemporary scratch(this);
1879     GPRReg scratchReg = scratch.gpr();
1880
1881     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1882
1883     // Load the character into scratchReg
1884     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1885
1886     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1887     JITCompiler::Jump cont8Bit = m_jit.jump();
1888
1889     is16Bit.link(&m_jit);
1890
1891     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1892
1893     cont8Bit.link(&m_jit);
1894
1895     int32Result(scratchReg, m_currentNode);
1896 }
1897
1898 void SpeculativeJIT::compileGetByValOnString(Node* node)
1899 {
1900     SpeculateCellOperand base(this, node->child1());
1901     SpeculateStrictInt32Operand property(this, node->child2());
1902     StorageOperand storage(this, node->child3());
1903     GPRReg baseReg = base.gpr();
1904     GPRReg propertyReg = property.gpr();
1905     GPRReg storageReg = storage.gpr();
1906
1907     GPRTemporary scratch(this);
1908     GPRReg scratchReg = scratch.gpr();
1909 #if USE(JSVALUE32_64)
1910     GPRTemporary resultTag;
1911     GPRReg resultTagReg = InvalidGPRReg;
1912     if (node->arrayMode().isOutOfBounds()) {
1913         GPRTemporary realResultTag(this);
1914         resultTag.adopt(realResultTag);
1915         resultTagReg = resultTag.gpr();
1916     }
1917 #endif
1918
1919     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1920
1921     // unsigned comparison so we can filter out negative indices and indices that are too large
1922     JITCompiler::Jump outOfBounds = m_jit.branch32(
1923         MacroAssembler::AboveOrEqual, propertyReg,
1924         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1925     if (node->arrayMode().isInBounds())
1926         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1927
1928     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1929
1930     // Load the character into scratchReg
1931     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1932
1933     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1934     JITCompiler::Jump cont8Bit = m_jit.jump();
1935
1936     is16Bit.link(&m_jit);
1937
1938     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1939
1940     JITCompiler::Jump bigCharacter =
1941         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1942
1943     // 8 bit string values don't need the isASCII check.
1944     cont8Bit.link(&m_jit);
1945
1946     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1947     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1948     m_jit.loadPtr(scratchReg, scratchReg);
1949
1950     addSlowPathGenerator(
1951         slowPathCall(
1952             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1953
1954     if (node->arrayMode().isOutOfBounds()) {
1955 #if USE(JSVALUE32_64)
1956         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1957 #endif
1958
1959         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1960         bool prototypeChainIsSane = false;
1961         if (globalObject->stringPrototypeChainIsSane()) {
1962             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
1963             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
1964             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
1965             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
1966             // indexed properties either.
1967             // https://bugs.webkit.org/show_bug.cgi?id=144668
1968             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
1969             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
1970             prototypeChainIsSane = globalObject->stringPrototypeChainIsSane();
1971         }
1972         if (prototypeChainIsSane) {
1973             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
1974             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
1975             
1976 #if USE(JSVALUE64)
1977             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1978                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
1979 #else
1980             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1981                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
1982                 baseReg, propertyReg));
1983 #endif
1984         } else {
1985 #if USE(JSVALUE64)
1986             addSlowPathGenerator(
1987                 slowPathCall(
1988                     outOfBounds, this, operationGetByValStringInt,
1989                     scratchReg, baseReg, propertyReg));
1990 #else
1991             addSlowPathGenerator(
1992                 slowPathCall(
1993                     outOfBounds, this, operationGetByValStringInt,
1994                     resultTagReg, scratchReg, baseReg, propertyReg));
1995 #endif
1996         }
1997         
1998 #if USE(JSVALUE64)
1999         jsValueResult(scratchReg, m_currentNode);
2000 #else
2001         jsValueResult(resultTagReg, scratchReg, m_currentNode);
2002 #endif
2003     } else
2004         cellResult(scratchReg, m_currentNode);
2005 }
2006
2007 void SpeculativeJIT::compileFromCharCode(Node* node)
2008 {
2009     Edge& child = node->child1();
2010     if (child.useKind() == UntypedUse) {
2011         JSValueOperand opr(this, child);
2012         JSValueRegs oprRegs = opr.jsValueRegs();
2013 #if USE(JSVALUE64)
2014         GPRTemporary result(this);
2015         JSValueRegs resultRegs = JSValueRegs(result.gpr());
2016 #else
2017         GPRTemporary resultTag(this);
2018         GPRTemporary resultPayload(this);
2019         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2020 #endif
2021         flushRegisters();
2022         callOperation(operationStringFromCharCodeUntyped, resultRegs, oprRegs);
2023         m_jit.exceptionCheck();
2024         
2025         jsValueResult(resultRegs, node);
2026         return;
2027     }
2028
2029     SpeculateStrictInt32Operand property(this, child);
2030     GPRReg propertyReg = property.gpr();
2031     GPRTemporary smallStrings(this);
2032     GPRTemporary scratch(this);
2033     GPRReg scratchReg = scratch.gpr();
2034     GPRReg smallStringsReg = smallStrings.gpr();
2035
2036     JITCompiler::JumpList slowCases;
2037     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2038     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2039     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2040
2041     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2042     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2043     cellResult(scratchReg, m_currentNode);
2044 }
2045
2046 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2047 {
2048     VirtualRegister virtualRegister = node->virtualRegister();
2049     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2050
2051     switch (info.registerFormat()) {
2052     case DataFormatStorage:
2053         RELEASE_ASSERT_NOT_REACHED();
2054
2055     case DataFormatBoolean:
2056     case DataFormatCell:
2057         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2058         return GeneratedOperandTypeUnknown;
2059
2060     case DataFormatNone:
2061     case DataFormatJSCell:
2062     case DataFormatJS:
2063     case DataFormatJSBoolean:
2064     case DataFormatJSDouble:
2065         return GeneratedOperandJSValue;
2066
2067     case DataFormatJSInt32:
2068     case DataFormatInt32:
2069         return GeneratedOperandInteger;
2070
2071     default:
2072         RELEASE_ASSERT_NOT_REACHED();
2073         return GeneratedOperandTypeUnknown;
2074     }
2075 }
2076
2077 void SpeculativeJIT::compileValueToInt32(Node* node)
2078 {
2079     switch (node->child1().useKind()) {
2080 #if USE(JSVALUE64)
2081     case Int52RepUse: {
2082         SpeculateStrictInt52Operand op1(this, node->child1());
2083         GPRTemporary result(this, Reuse, op1);
2084         GPRReg op1GPR = op1.gpr();
2085         GPRReg resultGPR = result.gpr();
2086         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
2087         int32Result(resultGPR, node, DataFormatInt32);
2088         return;
2089     }
2090 #endif // USE(JSVALUE64)
2091         
2092     case DoubleRepUse: {
2093         GPRTemporary result(this);
2094         SpeculateDoubleOperand op1(this, node->child1());
2095         FPRReg fpr = op1.fpr();
2096         GPRReg gpr = result.gpr();
2097         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2098         
2099         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
2100         
2101         int32Result(gpr, node);
2102         return;
2103     }
2104     
2105     case NumberUse:
2106     case NotCellUse: {
2107         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2108         case GeneratedOperandInteger: {
2109             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2110             GPRTemporary result(this, Reuse, op1);
2111             m_jit.move(op1.gpr(), result.gpr());
2112             int32Result(result.gpr(), node, op1.format());
2113             return;
2114         }
2115         case GeneratedOperandJSValue: {
2116             GPRTemporary result(this);
2117 #if USE(JSVALUE64)
2118             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2119
2120             GPRReg gpr = op1.gpr();
2121             GPRReg resultGpr = result.gpr();
2122             FPRTemporary tempFpr(this);
2123             FPRReg fpr = tempFpr.fpr();
2124
2125             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2126             JITCompiler::JumpList converted;
2127
2128             if (node->child1().useKind() == NumberUse) {
2129                 DFG_TYPE_CHECK(
2130                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
2131                     m_jit.branchTest64(
2132                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2133             } else {
2134                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2135                 
2136                 DFG_TYPE_CHECK(
2137                     JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
2138                 
2139                 // It's not a cell: so true turns into 1 and all else turns into 0.
2140                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2141                 converted.append(m_jit.jump());
2142                 
2143                 isNumber.link(&m_jit);
2144             }
2145
2146             // First, if we get here we have a double encoded as a JSValue
2147             unboxDouble(gpr, resultGpr, fpr);
2148
2149             silentSpillAllRegisters(resultGpr);
2150             callOperation(toInt32, resultGpr, fpr);
2151             silentFillAllRegisters(resultGpr);
2152
2153             converted.append(m_jit.jump());
2154
2155             isInteger.link(&m_jit);
2156             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2157
2158             converted.link(&m_jit);
2159 #else
2160             Node* childNode = node->child1().node();
2161             VirtualRegister virtualRegister = childNode->virtualRegister();
2162             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2163
2164             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2165
2166             GPRReg payloadGPR = op1.payloadGPR();
2167             GPRReg resultGpr = result.gpr();
2168         
2169             JITCompiler::JumpList converted;
2170
2171             if (info.registerFormat() == DataFormatJSInt32)
2172                 m_jit.move(payloadGPR, resultGpr);
2173             else {
2174                 GPRReg tagGPR = op1.tagGPR();
2175                 FPRTemporary tempFpr(this);
2176                 FPRReg fpr = tempFpr.fpr();
2177                 FPRTemporary scratch(this);
2178
2179                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2180
2181                 if (node->child1().useKind() == NumberUse) {
2182                     DFG_TYPE_CHECK(
2183                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2184                         m_jit.branch32(
2185                             MacroAssembler::AboveOrEqual, tagGPR,
2186                             TrustedImm32(JSValue::LowestTag)));
2187                 } else {
2188                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2189                     
2190                     DFG_TYPE_CHECK(
2191                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2192                         m_jit.branchIfCell(op1.jsValueRegs()));
2193                     
2194                     // It's not a cell: so true turns into 1 and all else turns into 0.
2195                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2196                     m_jit.move(TrustedImm32(0), resultGpr);
2197                     converted.append(m_jit.jump());
2198                     
2199                     isBoolean.link(&m_jit);
2200                     m_jit.move(payloadGPR, resultGpr);
2201                     converted.append(m_jit.jump());
2202                     
2203                     isNumber.link(&m_jit);
2204                 }
2205
2206                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2207
2208                 silentSpillAllRegisters(resultGpr);
2209                 callOperation(toInt32, resultGpr, fpr);
2210                 silentFillAllRegisters(resultGpr);
2211
2212                 converted.append(m_jit.jump());
2213
2214                 isInteger.link(&m_jit);
2215                 m_jit.move(payloadGPR, resultGpr);
2216
2217                 converted.link(&m_jit);
2218             }
2219 #endif
2220             int32Result(resultGpr, node);
2221             return;
2222         }
2223         case GeneratedOperandTypeUnknown:
2224             RELEASE_ASSERT(!m_compileOkay);
2225             return;
2226         }
2227         RELEASE_ASSERT_NOT_REACHED();
2228         return;
2229     }
2230     
2231     default:
2232         ASSERT(!m_compileOkay);
2233         return;
2234     }
2235 }
2236
2237 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2238 {
2239     if (doesOverflow(node->arithMode())) {
2240         if (enableInt52()) {
2241             SpeculateInt32Operand op1(this, node->child1());
2242             GPRTemporary result(this, Reuse, op1);
2243             m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr());
2244             strictInt52Result(result.gpr(), node);
2245             return;
2246         }
2247         SpeculateInt32Operand op1(this, node->child1());
2248         FPRTemporary result(this);
2249             
2250         GPRReg inputGPR = op1.gpr();
2251         FPRReg outputFPR = result.fpr();
2252             
2253         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2254             
2255         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2256         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2257         positive.link(&m_jit);
2258             
2259         doubleResult(outputFPR, node);
2260         return;
2261     }
2262     
2263     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2264
2265     SpeculateInt32Operand op1(this, node->child1());
2266     GPRTemporary result(this);
2267
2268     m_jit.move(op1.gpr(), result.gpr());
2269
2270     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2271
2272     int32Result(result.gpr(), node, op1.format());
2273 }
2274
2275 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2276 {
2277     SpeculateDoubleOperand op1(this, node->child1());
2278     FPRTemporary scratch(this);
2279     GPRTemporary result(this);
2280     
2281     FPRReg valueFPR = op1.fpr();
2282     FPRReg scratchFPR = scratch.fpr();
2283     GPRReg resultGPR = result.gpr();
2284
2285     JITCompiler::JumpList failureCases;
2286     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2287     m_jit.branchConvertDoubleToInt32(
2288         valueFPR, resultGPR, failureCases, scratchFPR,
2289         shouldCheckNegativeZero(node->arithMode()));
2290     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2291
2292     int32Result(resultGPR, node);
2293 }
2294
2295 void SpeculativeJIT::compileDoubleRep(Node* node)
2296 {
2297     switch (node->child1().useKind()) {
2298     case RealNumberUse: {
2299         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2300         FPRTemporary result(this);
2301         
2302         JSValueRegs op1Regs = op1.jsValueRegs();
2303         FPRReg resultFPR = result.fpr();
2304         
2305 #if USE(JSVALUE64)
2306         GPRTemporary temp(this);
2307         GPRReg tempGPR = temp.gpr();
2308         m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR);
2309 #else
2310         FPRTemporary temp(this);
2311         FPRReg tempFPR = temp.fpr();
2312         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2313 #endif
2314         
2315         JITCompiler::Jump done = m_jit.branchDouble(
2316             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2317         
2318         DFG_TYPE_CHECK(
2319             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2320         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2321         
2322         done.link(&m_jit);
2323         
2324         doubleResult(resultFPR, node);
2325         return;
2326     }
2327     
2328     case NotCellUse:
2329     case NumberUse: {
2330         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2331
2332         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2333         if (isInt32Speculation(possibleTypes)) {
2334             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2335             FPRTemporary result(this);
2336             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2337             doubleResult(result.fpr(), node);
2338             return;
2339         }
2340
2341         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2342         FPRTemporary result(this);
2343
2344 #if USE(JSVALUE64)
2345         GPRTemporary temp(this);
2346
2347         GPRReg op1GPR = op1.gpr();
2348         GPRReg tempGPR = temp.gpr();
2349         FPRReg resultFPR = result.fpr();
2350         JITCompiler::JumpList done;
2351
2352         JITCompiler::Jump isInteger = m_jit.branch64(
2353             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2354
2355         if (node->child1().useKind() == NotCellUse) {
2356             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2357             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2358
2359             static const double zero = 0;
2360             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2361
2362             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2363             done.append(isNull);
2364
2365             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2366                 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2367
2368             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2369             static const double one = 1;
2370             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2371             done.append(m_jit.jump());
2372             done.append(isFalse);
2373
2374             isUndefined.link(&m_jit);
2375             static const double NaN = PNaN;
2376             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2377             done.append(m_jit.jump());
2378
2379             isNumber.link(&m_jit);
2380         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2381             typeCheck(
2382                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2383                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2384         }
2385
2386         unboxDouble(op1GPR, tempGPR, resultFPR);
2387         done.append(m_jit.jump());
2388     
2389         isInteger.link(&m_jit);
2390         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2391         done.link(&m_jit);
2392 #else // USE(JSVALUE64) -> this is the 32_64 case
2393         FPRTemporary temp(this);
2394     
2395         GPRReg op1TagGPR = op1.tagGPR();
2396         GPRReg op1PayloadGPR = op1.payloadGPR();
2397         FPRReg tempFPR = temp.fpr();
2398         FPRReg resultFPR = result.fpr();
2399         JITCompiler::JumpList done;
2400     
2401         JITCompiler::Jump isInteger = m_jit.branch32(
2402             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2403
2404         if (node->child1().useKind() == NotCellUse) {
2405             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2406             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2407
2408             static const double zero = 0;
2409             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2410
2411             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2412             done.append(isNull);
2413
2414             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2415
2416             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2417             static const double one = 1;
2418             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2419             done.append(m_jit.jump());
2420             done.append(isFalse);
2421
2422             isUndefined.link(&m_jit);
2423             static const double NaN = PNaN;
2424             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2425             done.append(m_jit.jump());
2426
2427             isNumber.link(&m_jit);
2428         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2429             typeCheck(
2430                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2431                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2432         }
2433
2434         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2435         done.append(m_jit.jump());
2436     
2437         isInteger.link(&m_jit);
2438         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2439         done.link(&m_jit);
2440 #endif // USE(JSVALUE64)
2441     
2442         doubleResult(resultFPR, node);
2443         return;
2444     }
2445         
2446 #if USE(JSVALUE64)
2447     case Int52RepUse: {
2448         SpeculateStrictInt52Operand value(this, node->child1());
2449         FPRTemporary result(this);
2450         
2451         GPRReg valueGPR = value.gpr();
2452         FPRReg resultFPR = result.fpr();
2453
2454         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2455         
2456         doubleResult(resultFPR, node);
2457         return;
2458     }
2459 #endif // USE(JSVALUE64)
2460         
2461     default:
2462         RELEASE_ASSERT_NOT_REACHED();
2463         return;
2464     }
2465 }
2466
2467 void SpeculativeJIT::compileValueRep(Node* node)
2468 {
2469     switch (node->child1().useKind()) {
2470     case DoubleRepUse: {
2471         SpeculateDoubleOperand value(this, node->child1());
2472         JSValueRegsTemporary result(this);
2473         
2474         FPRReg valueFPR = value.fpr();
2475         JSValueRegs resultRegs = result.regs();
2476         
2477         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2478         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2479         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2480         // local was purified.
2481         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2482             m_jit.purifyNaN(valueFPR);
2483
2484         boxDouble(valueFPR, resultRegs);
2485         
2486         jsValueResult(resultRegs, node);
2487         return;
2488     }
2489         
2490 #if USE(JSVALUE64)
2491     case Int52RepUse: {
2492         SpeculateStrictInt52Operand value(this, node->child1());
2493         GPRTemporary result(this);
2494         
2495         GPRReg valueGPR = value.gpr();
2496         GPRReg resultGPR = result.gpr();
2497         
2498         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2499         
2500         jsValueResult(resultGPR, node);
2501         return;
2502     }
2503 #endif // USE(JSVALUE64)
2504         
2505     default:
2506         RELEASE_ASSERT_NOT_REACHED();
2507         return;
2508     }
2509 }
2510
2511 static double clampDoubleToByte(double d)
2512 {
2513     d += 0.5;
2514     if (!(d > 0))
2515         d = 0;
2516     else if (d > 255)
2517         d = 255;
2518     return d;
2519 }
2520
2521 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2522 {
2523     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2524     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2525     jit.xorPtr(result, result);
2526     MacroAssembler::Jump clamped = jit.jump();
2527     tooBig.link(&jit);
2528     jit.move(JITCompiler::TrustedImm32(255), result);
2529     clamped.link(&jit);
2530     inBounds.link(&jit);
2531 }
2532
2533 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2534 {
2535     // Unordered compare so we pick up NaN
2536     static const double zero = 0;
2537     static const double byteMax = 255;
2538     static const double half = 0.5;
2539     jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2540     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2541     jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2542     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2543     
2544     jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2545     // FIXME: This should probably just use a floating point round!
2546     // https://bugs.webkit.org/show_bug.cgi?id=72054
2547     jit.addDouble(source, scratch);
2548     jit.truncateDoubleToInt32(scratch, result);   
2549     MacroAssembler::Jump truncatedInt = jit.jump();
2550     
2551     tooSmall.link(&jit);
2552     jit.xorPtr(result, result);
2553     MacroAssembler::Jump zeroed = jit.jump();
2554     
2555     tooBig.link(&jit);
2556     jit.move(JITCompiler::TrustedImm32(255), result);
2557     
2558     truncatedInt.link(&jit);
2559     zeroed.link(&jit);
2560
2561 }
2562
2563 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2564 {
2565     if (node->op() == PutByValAlias)
2566         return JITCompiler::Jump();
2567     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2568         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2569     if (view) {
2570         uint32_t length = view->length();
2571         Node* indexNode = m_jit.graph().child(node, 1).node();
2572         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2573             return JITCompiler::Jump();
2574         return m_jit.branch32(
2575             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2576     }
2577     return m_jit.branch32(
2578         MacroAssembler::AboveOrEqual, indexGPR,
2579         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2580 }
2581
2582 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2583 {
2584     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2585     if (!jump.isSet())
2586         return;
2587     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2588 }
2589
2590 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2591 {
2592     ASSERT(isInt(type));
2593     
2594     SpeculateCellOperand base(this, node->child1());
2595     SpeculateStrictInt32Operand property(this, node->child2());
2596     StorageOperand storage(this, node->child3());
2597
2598     GPRReg baseReg = base.gpr();
2599     GPRReg propertyReg = property.gpr();
2600     GPRReg storageReg = storage.gpr();
2601
2602     GPRTemporary result(this);
2603     GPRReg resultReg = result.gpr();
2604
2605     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2606
2607     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2608     switch (elementSize(type)) {
2609     case 1:
2610         if (isSigned(type))
2611             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2612         else
2613             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2614         break;
2615     case 2:
2616         if (isSigned(type))
2617             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2618         else
2619             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2620         break;
2621     case 4:
2622         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2623         break;
2624     default:
2625         CRASH();
2626     }
2627     if (elementSize(type) < 4 || isSigned(type)) {
2628         int32Result(resultReg, node);
2629         return;
2630     }
2631     
2632     ASSERT(elementSize(type) == 4 && !isSigned(type));
2633     if (node->shouldSpeculateInt32()) {
2634         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2635         int32Result(resultReg, node);
2636         return;
2637     }
2638     
2639 #if USE(JSVALUE64)
2640     if (node->shouldSpeculateAnyInt()) {
2641         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2642         strictInt52Result(resultReg, node);
2643         return;
2644     }
2645 #endif
2646     
2647     FPRTemporary fresult(this);
2648     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2649     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2650     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2651     positive.link(&m_jit);
2652     doubleResult(fresult.fpr(), node);
2653 }
2654
2655 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2656 {
2657     ASSERT(isInt(type));
2658     
2659     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2660     GPRReg storageReg = storage.gpr();
2661     
2662     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2663     
2664     GPRTemporary value;
2665     GPRReg valueGPR = InvalidGPRReg;
2666     
2667     if (valueUse->isConstant()) {
2668         JSValue jsValue = valueUse->asJSValue();
2669         if (!jsValue.isNumber()) {
2670             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2671             noResult(node);
2672             return;
2673         }
2674         double d = jsValue.asNumber();
2675         if (isClamped(type)) {
2676             ASSERT(elementSize(type) == 1);
2677             d = clampDoubleToByte(d);
2678         }
2679         GPRTemporary scratch(this);
2680         GPRReg scratchReg = scratch.gpr();
2681         m_jit.move(Imm32(toInt32(d)), scratchReg);
2682         value.adopt(scratch);
2683         valueGPR = scratchReg;
2684     } else {
2685         switch (valueUse.useKind()) {
2686         case Int32Use: {
2687             SpeculateInt32Operand valueOp(this, valueUse);
2688             GPRTemporary scratch(this);
2689             GPRReg scratchReg = scratch.gpr();
2690             m_jit.move(valueOp.gpr(), scratchReg);
2691             if (isClamped(type)) {
2692                 ASSERT(elementSize(type) == 1);
2693                 compileClampIntegerToByte(m_jit, scratchReg);
2694             }
2695             value.adopt(scratch);
2696             valueGPR = scratchReg;
2697             break;
2698         }
2699             
2700 #if USE(JSVALUE64)
2701         case Int52RepUse: {
2702             SpeculateStrictInt52Operand valueOp(this, valueUse);
2703             GPRTemporary scratch(this);
2704             GPRReg scratchReg = scratch.gpr();
2705             m_jit.move(valueOp.gpr(), scratchReg);
2706             if (isClamped(type)) {
2707                 ASSERT(elementSize(type) == 1);
2708                 MacroAssembler::Jump inBounds = m_jit.branch64(
2709                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2710                 MacroAssembler::Jump tooBig = m_jit.branch64(
2711                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2712                 m_jit.move(TrustedImm32(0), scratchReg);
2713                 MacroAssembler::Jump clamped = m_jit.jump();
2714                 tooBig.link(&m_jit);
2715                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2716                 clamped.link(&m_jit);
2717                 inBounds.link(&m_jit);
2718             }
2719             value.adopt(scratch);
2720             valueGPR = scratchReg;
2721             break;
2722         }
2723 #endif // USE(JSVALUE64)
2724             
2725         case DoubleRepUse: {
2726             if (isClamped(type)) {
2727                 ASSERT(elementSize(type) == 1);
2728                 SpeculateDoubleOperand valueOp(this, valueUse);
2729                 GPRTemporary result(this);
2730                 FPRTemporary floatScratch(this);
2731                 FPRReg fpr = valueOp.fpr();
2732                 GPRReg gpr = result.gpr();
2733                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2734                 value.adopt(result);
2735                 valueGPR = gpr;
2736             } else {
2737                 SpeculateDoubleOperand valueOp(this, valueUse);
2738                 GPRTemporary result(this);
2739                 FPRReg fpr = valueOp.fpr();
2740                 GPRReg gpr = result.gpr();
2741                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2742                 m_jit.xorPtr(gpr, gpr);
2743                 MacroAssembler::Jump fixed = m_jit.jump();
2744                 notNaN.link(&m_jit);
2745                 
2746                 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2747                     fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2748                 
2749                 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
2750                 
2751                 fixed.link(&m_jit);
2752                 value.adopt(result);
2753                 valueGPR = gpr;
2754             }
2755             break;
2756         }
2757             
2758         default:
2759             RELEASE_ASSERT_NOT_REACHED();
2760             break;
2761         }
2762     }
2763     
2764     ASSERT_UNUSED(valueGPR, valueGPR != property);
2765     ASSERT(valueGPR != base);
2766     ASSERT(valueGPR != storageReg);
2767     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2768     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2769         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2770         outOfBounds = MacroAssembler::Jump();
2771     }
2772
2773     switch (elementSize(type)) {
2774     case 1:
2775         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2776         break;
2777     case 2:
2778         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2779         break;
2780     case 4:
2781         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2782         break;
2783     default:
2784         CRASH();
2785     }
2786     if (outOfBounds.isSet())
2787         outOfBounds.link(&m_jit);
2788     noResult(node);
2789 }
2790
2791 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2792 {
2793     ASSERT(isFloat(type));
2794     
2795     SpeculateCellOperand base(this, node->child1());
2796     SpeculateStrictInt32Operand property(this, node->child2());
2797     StorageOperand storage(this, node->child3());
2798
2799     GPRReg baseReg = base.gpr();
2800     GPRReg propertyReg = property.gpr();
2801     GPRReg storageReg = storage.gpr();
2802
2803     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2804
2805     FPRTemporary result(this);
2806     FPRReg resultReg = result.fpr();
2807     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2808     switch (elementSize(type)) {
2809     case 4:
2810         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2811         m_jit.convertFloatToDouble(resultReg, resultReg);
2812         break;
2813     case 8: {
2814         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2815         break;
2816     }
2817     default:
2818         RELEASE_ASSERT_NOT_REACHED();
2819     }
2820     
2821     doubleResult(resultReg, node);
2822 }
2823
2824 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2825 {
2826     ASSERT(isFloat(type));
2827     
2828     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2829     GPRReg storageReg = storage.gpr();
2830     
2831     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2832     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2833
2834     SpeculateDoubleOperand valueOp(this, valueUse);
2835     FPRTemporary scratch(this);
2836     FPRReg valueFPR = valueOp.fpr();
2837     FPRReg scratchFPR = scratch.fpr();
2838
2839     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2840     
2841     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2842     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2843         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2844         outOfBounds = MacroAssembler::Jump();
2845     }
2846     
2847     switch (elementSize(type)) {
2848     case 4: {
2849         m_jit.moveDouble(valueFPR, scratchFPR);
2850         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2851         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2852         break;
2853     }
2854     case 8:
2855         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2856         break;
2857     default:
2858         RELEASE_ASSERT_NOT_REACHED();
2859     }
2860     if (outOfBounds.isSet())
2861         outOfBounds.link(&m_jit);
2862     noResult(node);
2863 }
2864
2865 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2866 {
2867     // Check that prototype is an object.
2868     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
2869     
2870     // Initialize scratchReg with the value being checked.
2871     m_jit.move(valueReg, scratchReg);
2872     
2873     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2874     MacroAssembler::Label loop(&m_jit);
2875     MacroAssembler::Jump performDefaultHasInstance = m_jit.branch8(MacroAssembler::Equal,
2876         MacroAssembler::Address(scratchReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType));
2877     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2878     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2879     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2880 #if USE(JSVALUE64)
2881     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2882 #else
2883     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2884 #endif
2885     
2886     // No match - result is false.
2887 #if USE(JSVALUE64)
2888     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2889 #else
2890     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2891 #endif
2892     MacroAssembler::JumpList doneJumps; 
2893     doneJumps.append(m_jit.jump());
2894
2895     performDefaultHasInstance.link(&m_jit);
2896     silentSpillAllRegisters(scratchReg);
2897     callOperation(operationDefaultHasInstance, scratchReg, valueReg, prototypeReg); 
2898     silentFillAllRegisters(scratchReg);
2899     m_jit.exceptionCheck();
2900 #if USE(JSVALUE64)
2901     m_jit.or32(TrustedImm32(ValueFalse), scratchReg);
2902 #endif
2903     doneJumps.append(m_jit.jump());
2904     
2905     isInstance.link(&m_jit);
2906 #if USE(JSVALUE64)
2907     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2908 #else
2909     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2910 #endif
2911     
2912     doneJumps.link(&m_jit);
2913 }
2914
2915 void SpeculativeJIT::compileCheckTypeInfoFlags(Node* node)
2916 {
2917     SpeculateCellOperand base(this, node->child1());
2918
2919     GPRReg baseGPR = base.gpr();
2920
2921     speculationCheck(BadTypeInfoFlags, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(node->typeInfoOperand())));
2922
2923     noResult(node);
2924 }
2925
2926 void SpeculativeJIT::compileInstanceOf(Node* node)
2927 {
2928     if (node->child1().useKind() == UntypedUse) {
2929         // It might not be a cell. Speculate less aggressively.
2930         // Or: it might only be used once (i.e. by us), so we get zero benefit
2931         // from speculating any more aggressively than we absolutely need to.
2932         
2933         JSValueOperand value(this, node->child1());
2934         SpeculateCellOperand prototype(this, node->child2());
2935         GPRTemporary scratch(this);
2936         GPRTemporary scratch2(this);
2937         
2938         GPRReg prototypeReg = prototype.gpr();
2939         GPRReg scratchReg = scratch.gpr();
2940         GPRReg scratch2Reg = scratch2.gpr();
2941         
2942         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
2943         GPRReg valueReg = value.jsValueRegs().payloadGPR();
2944         moveFalseTo(scratchReg);
2945
2946         MacroAssembler::Jump done = m_jit.jump();
2947         
2948         isCell.link(&m_jit);
2949         
2950         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2951         
2952         done.link(&m_jit);
2953
2954         blessedBooleanResult(scratchReg, node);
2955         return;
2956     }
2957     
2958     SpeculateCellOperand value(this, node->child1());
2959     SpeculateCellOperand prototype(this, node->child2());
2960     
2961     GPRTemporary scratch(this);
2962     GPRTemporary scratch2(this);
2963     
2964     GPRReg valueReg = value.gpr();
2965     GPRReg prototypeReg = prototype.gpr();
2966     GPRReg scratchReg = scratch.gpr();
2967     GPRReg scratch2Reg = scratch2.gpr();
2968     
2969     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2970
2971     blessedBooleanResult(scratchReg, node);
2972 }
2973
2974 template<typename SnippetGenerator, J_JITOperation_EJJ snippetSlowPathFunction>
2975 void SpeculativeJIT::emitUntypedBitOp(Node* node)
2976 {
2977     Edge& leftChild = node->child1();
2978     Edge& rightChild = node->child2();
2979
2980     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
2981         JSValueOperand left(this, leftChild);
2982         JSValueOperand right(this, rightChild);
2983         JSValueRegs leftRegs = left.jsValueRegs();
2984         JSValueRegs rightRegs = right.jsValueRegs();
2985 #if USE(JSVALUE64)
2986         GPRTemporary result(this);
2987         JSValueRegs resultRegs = JSValueRegs(result.gpr());
2988 #else
2989         GPRTemporary resultTag(this);
2990         GPRTemporary resultPayload(this);
2991         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2992 #endif
2993         flushRegisters();
2994         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
2995         m_jit.exceptionCheck();
2996
2997         jsValueResult(resultRegs, node);
2998         return;
2999     }
3000
3001     Optional<JSValueOperand> left;
3002     Optional<JSValueOperand> right;
3003
3004     JSValueRegs leftRegs;
3005     JSValueRegs rightRegs;
3006
3007 #if USE(JSVALUE64)
3008     GPRTemporary result(this);
3009     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3010     GPRTemporary scratch(this);
3011     GPRReg scratchGPR = scratch.gpr();
3012 #else
3013     GPRTemporary resultTag(this);
3014     GPRTemporary resultPayload(this);
3015     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3016     GPRReg scratchGPR = resultTag.gpr();
3017 #endif
3018
3019     SnippetOperand leftOperand;
3020     SnippetOperand rightOperand;
3021
3022     // The snippet generator does not support both operands being constant. If the left
3023     // operand is already const, we'll ignore the right operand's constness.
3024     if (leftChild->isInt32Constant())
3025         leftOperand.setConstInt32(leftChild->asInt32());
3026     else if (rightChild->isInt32Constant())
3027         rightOperand.setConstInt32(rightChild->asInt32());
3028
3029     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3030
3031     if (!leftOperand.isConst()) {
3032         left = JSValueOperand(this, leftChild);
3033         leftRegs = left->jsValueRegs();
3034     }
3035     if (!rightOperand.isConst()) {
3036         right = JSValueOperand(this, rightChild);
3037         rightRegs = right->jsValueRegs();
3038     }
3039
3040     SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
3041     gen.generateFastPath(m_jit);
3042
3043     ASSERT(gen.didEmitFastPath());
3044     gen.endJumpList().append(m_jit.jump());
3045
3046     gen.slowPathJumpList().link(&m_jit);
3047     silentSpillAllRegisters(resultRegs);
3048
3049     if (leftOperand.isConst()) {
3050         leftRegs = resultRegs;
3051         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3052     } else if (rightOperand.isConst()) {
3053         rightRegs = resultRegs;
3054         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3055     }
3056
3057     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3058
3059     silentFillAllRegisters(resultRegs);
3060     m_jit.exceptionCheck();
3061
3062     gen.endJumpList().link(&m_jit);
3063     jsValueResult(resultRegs, node);
3064 }
3065
3066 void SpeculativeJIT::compileBitwiseOp(Node* node)
3067 {
3068     NodeType op = node->op();
3069     Edge& leftChild = node->child1();
3070     Edge& rightChild = node->child2();
3071
3072     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3073         switch (op) {
3074         case BitAnd:
3075             emitUntypedBitOp<JITBitAndGenerator, operationValueBitAnd>(node);
3076             return;
3077         case BitOr:
3078             emitUntypedBitOp<JITBitOrGenerator, operationValueBitOr>(node);
3079             return;
3080         case BitXor:
3081             emitUntypedBitOp<JITBitXorGenerator, operationValueBitXor>(node);
3082             return;
3083         default:
3084             RELEASE_ASSERT_NOT_REACHED();
3085         }
3086     }
3087
3088     if (leftChild->isInt32Constant()) {
3089         SpeculateInt32Operand op2(this, rightChild);
3090         GPRTemporary result(this, Reuse, op2);
3091
3092         bitOp(op, leftChild->asInt32(), op2.gpr(), result.gpr());
3093
3094         int32Result(result.gpr(), node);
3095
3096     } else if (rightChild->isInt32Constant()) {
3097         SpeculateInt32Operand op1(this, leftChild);
3098         GPRTemporary result(this, Reuse, op1);
3099
3100         bitOp(op, rightChild->asInt32(), op1.gpr(), result.gpr());
3101
3102         int32Result(result.gpr(), node);
3103
3104     } else {
3105         SpeculateInt32Operand op1(this, leftChild);
3106         SpeculateInt32Operand op2(this, rightChild);
3107         GPRTemporary result(this, Reuse, op1, op2);
3108         
3109         GPRReg reg1 = op1.gpr();
3110         GPRReg reg2 = op2.gpr();
3111         bitOp(op, reg1, reg2, result.gpr());
3112         
3113         int32Result(result.gpr(), node);
3114     }
3115 }
3116
3117 void SpeculativeJIT::emitUntypedRightShiftBitOp(Node* node)
3118 {
3119     J_JITOperation_EJJ snippetSlowPathFunction = node->op() == BitRShift
3120         ? operationValueBitRShift : operationValueBitURShift;
3121     JITRightShiftGenerator::ShiftType shiftType = node->op() == BitRShift
3122         ? JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift;
3123
3124     Edge& leftChild = node->child1();
3125     Edge& rightChild = node->child2();
3126
3127     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3128         JSValueOperand left(this, leftChild);
3129         JSValueOperand right(this, rightChild);
3130         JSValueRegs leftRegs = left.jsValueRegs();
3131         JSValueRegs rightRegs = right.jsValueRegs();
3132 #if USE(JSVALUE64)
3133         GPRTemporary result(this);
3134         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3135 #else
3136         GPRTemporary resultTag(this);
3137         GPRTemporary resultPayload(this);
3138         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3139 #endif
3140         flushRegisters();
3141         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3142         m_jit.exceptionCheck();
3143
3144         jsValueResult(resultRegs, node);
3145         return;
3146     }
3147
3148     Optional<JSValueOperand> left;
3149     Optional<JSValueOperand> right;
3150
3151     JSValueRegs leftRegs;
3152     JSValueRegs rightRegs;
3153
3154     FPRTemporary leftNumber(this);
3155     FPRReg leftFPR = leftNumber.fpr();
3156
3157 #if USE(JSVALUE64)
3158     GPRTemporary result(this);
3159     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3160     GPRTemporary scratch(this);
3161     GPRReg scratchGPR = scratch.gpr();
3162     FPRReg scratchFPR = InvalidFPRReg;
3163 #else
3164     GPRTemporary resultTag(this);
3165     GPRTemporary resultPayload(this);
3166     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3167     GPRReg scratchGPR = resultTag.gpr();
3168     FPRTemporary fprScratch(this);
3169     FPRReg scratchFPR = fprScratch.fpr();
3170 #endif
3171
3172     SnippetOperand leftOperand;
3173     SnippetOperand rightOperand;
3174
3175     // The snippet generator does not support both operands being constant. If the left
3176     // operand is already const, we'll ignore the right operand's constness.
3177     if (leftChild->isInt32Constant())
3178         leftOperand.setConstInt32(leftChild->asInt32());
3179     else if (rightChild->isInt32Constant())
3180         rightOperand.setConstInt32(rightChild->asInt32());
3181
3182     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3183
3184     if (!leftOperand.isConst()) {
3185         left = JSValueOperand(this, leftChild);
3186         leftRegs = left->jsValueRegs();
3187     }
3188     if (!rightOperand.isConst()) {
3189         right = JSValueOperand(this, rightChild);
3190         rightRegs = right->jsValueRegs();
3191     }
3192
3193     JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3194         leftFPR, scratchGPR, scratchFPR, shiftType);
3195     gen.generateFastPath(m_jit);
3196
3197     ASSERT(gen.didEmitFastPath());
3198     gen.endJumpList().append(m_jit.jump());
3199
3200     gen.slowPathJumpList().link(&m_jit);
3201     silentSpillAllRegisters(resultRegs);
3202
3203     if (leftOperand.isConst()) {
3204         leftRegs = resultRegs;
3205         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3206     } else if (rightOperand.isConst()) {
3207         rightRegs = resultRegs;
3208         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3209     }
3210
3211     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3212
3213     silentFillAllRegisters(resultRegs);
3214     m_jit.exceptionCheck();
3215
3216     gen.endJumpList().link(&m_jit);
3217     jsValueResult(resultRegs, node);
3218     return;
3219 }
3220
3221 void SpeculativeJIT::compileShiftOp(Node* node)
3222 {
3223     NodeType op = node->op();
3224     Edge& leftChild = node->child1();
3225     Edge& rightChild = node->child2();
3226
3227     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3228         switch (op) {
3229         case BitLShift:
3230             emitUntypedBitOp<JITLeftShiftGenerator, operationValueBitLShift>(node);
3231             return;
3232         case BitRShift:
3233         case BitURShift:
3234             emitUntypedRightShiftBitOp(node);
3235             return;
3236         default:
3237             RELEASE_ASSERT_NOT_REACHED();
3238         }
3239     }
3240
3241     if (rightChild->isInt32Constant()) {
3242         SpeculateInt32Operand op1(this, leftChild);
3243         GPRTemporary result(this, Reuse, op1);
3244
3245         shiftOp(op, op1.gpr(), rightChild->asInt32() & 0x1f, result.gpr());
3246
3247         int32Result(result.gpr(), node);
3248     } else {
3249         // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
3250         SpeculateInt32Operand op1(this, leftChild);
3251         SpeculateInt32Operand op2(this, rightChild);
3252         GPRTemporary result(this, Reuse, op1);
3253
3254         GPRReg reg1 = op1.gpr();
3255         GPRReg reg2 = op2.gpr();
3256         shiftOp(op, reg1, reg2, result.gpr());
3257
3258         int32Result(result.gpr(), node);
3259     }
3260 }
3261
3262 void SpeculativeJIT::compileValueAdd(Node* node)
3263 {
3264     Edge& leftChild = node->child1();
3265     Edge& rightChild = node->child2();
3266
3267     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3268         JSValueOperand left(this, leftChild);
3269         JSValueOperand right(this, rightChild);
3270         JSValueRegs leftRegs = left.jsValueRegs();
3271         JSValueRegs rightRegs = right.jsValueRegs();
3272 #if USE(JSVALUE64)
3273         GPRTemporary result(this);
3274         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3275 #else
3276         GPRTemporary resultTag(this);
3277         GPRTemporary resultPayload(this);
3278         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3279 #endif
3280         flushRegisters();
3281         callOperation(operationValueAddNotNumber, resultRegs, leftRegs, rightRegs);
3282         m_jit.exceptionCheck();
3283     
3284         jsValueResult(resultRegs, node);
3285         return;
3286     }
3287
3288     Optional<JSValueOperand> left;
3289     Optional<JSValueOperand> right;
3290
3291     JSValueRegs leftRegs;
3292     JSValueRegs rightRegs;
3293
3294     FPRTemporary leftNumber(this);
3295     FPRTemporary rightNumber(this);
3296     FPRReg leftFPR = leftNumber.fpr();
3297     FPRReg rightFPR = rightNumber.fpr();
3298
3299 #if USE(JSVALUE64)
3300     GPRTemporary result(this);
3301     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3302     GPRTemporary scratch(this);
3303     GPRReg scratchGPR = scratch.gpr();
3304     FPRReg scratchFPR = InvalidFPRReg;
3305 #else
3306     GPRTemporary resultTag(this);
3307     GPRTemporary resultPayload(this);
3308     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3309     GPRReg scratchGPR = resultTag.gpr();
3310     FPRTemporary fprScratch(this);
3311     FPRReg scratchFPR = fprScratch.fpr();
3312 #endif
3313
3314     SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3315     SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3316
3317     // The snippet generator does not support both operands being constant. If the left
3318     // operand is already const, we'll ignore the right operand's constness.
3319     if (leftChild->isInt32Constant())
3320         leftOperand.setConstInt32(leftChild->asInt32());
3321     else if (rightChild->isInt32Constant())
3322         rightOperand.setConstInt32(rightChild->asInt32());
3323
3324     ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3325
3326     if (!leftOperand.isConst()) {
3327         left = JSValueOperand(this, leftChild);
3328         leftRegs = left->jsValueRegs();
3329     }
3330     if (!rightOperand.isConst()) {
3331         right = JSValueOperand(this, rightChild);
3332         rightRegs = right->jsValueRegs();
3333     }
3334
3335     JITAddGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3336         leftFPR, rightFPR, scratchGPR, scratchFPR);
3337     gen.generateFastPath(m_jit);
3338
3339     ASSERT(gen.didEmitFastPath());
3340     gen.endJumpList().append(m_jit.jump());
3341
3342     gen.slowPathJumpList().link(&m_jit);
3343
3344     silentSpillAllRegisters(resultRegs);
3345
3346     if (leftOperand.isConst()) {
3347         leftRegs = resultRegs;
3348         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3349     } else if (rightOperand.isConst()) {
3350         rightRegs = resultRegs;
3351         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3352