[ES6] Add support for Symbol.isConcatSpreadable.
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGOSRExitFuzz.h"
38 #include "DFGSaneStringGetByValSlowPathGenerator.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DirectArguments.h"
41 #include "JITAddGenerator.h"
42 #include "JITBitAndGenerator.h"
43 #include "JITBitOrGenerator.h"
44 #include "JITBitXorGenerator.h"
45 #include "JITDivGenerator.h"
46 #include "JITLeftShiftGenerator.h"
47 #include "JITMulGenerator.h"
48 #include "JITRightShiftGenerator.h"
49 #include "JITSubGenerator.h"
50 #include "JSCInlines.h"
51 #include "JSEnvironmentRecord.h"
52 #include "JSGeneratorFunction.h"
53 #include "JSLexicalEnvironment.h"
54 #include "LinkBuffer.h"
55 #include "RegExpConstructor.h"
56 #include "ScopedArguments.h"
57 #include "ScratchRegisterAllocator.h"
58 #include "WriteBarrierBuffer.h"
59 #include <wtf/MathExtras.h>
60
61 namespace JSC { namespace DFG {
62
63 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
64     : m_compileOkay(true)
65     , m_jit(jit)
66     , m_currentNode(0)
67     , m_lastGeneratedNode(LastNodeType)
68     , m_indexInBlock(0)
69     , m_generationInfo(m_jit.graph().frameRegisterCount())
70     , m_state(m_jit.graph())
71     , m_interpreter(m_jit.graph(), m_state)
72     , m_stream(&jit.jitCode()->variableEventStream)
73     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
74 {
75 }
76
77 SpeculativeJIT::~SpeculativeJIT()
78 {
79 }
80
81 void SpeculativeJIT::emitAllocateRawObject(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements, unsigned vectorLength)
82 {
83     IndexingType indexingType = structure->indexingType();
84     bool hasIndexingHeader = hasIndexedProperties(indexingType);
85
86     unsigned inlineCapacity = structure->inlineCapacity();
87     unsigned outOfLineCapacity = structure->outOfLineCapacity();
88     
89     GPRTemporary scratch(this);
90     GPRTemporary scratch2(this);
91     GPRReg scratchGPR = scratch.gpr();
92     GPRReg scratch2GPR = scratch2.gpr();
93
94     ASSERT(vectorLength >= numElements);
95     vectorLength = std::max(BASE_VECTOR_LEN, vectorLength);
96     
97     JITCompiler::JumpList slowCases;
98
99     size_t size = 0;
100     if (hasIndexingHeader)
101         size += vectorLength * sizeof(JSValue) + sizeof(IndexingHeader);
102     size += outOfLineCapacity * sizeof(JSValue);
103
104     if (size) {
105         slowCases.append(
106             emitAllocateBasicStorage(TrustedImm32(size), storageGPR));
107         if (hasIndexingHeader)
108             m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
109         else
110             m_jit.addPtr(TrustedImm32(sizeof(IndexingHeader)), storageGPR);
111     } else
112         m_jit.move(TrustedImmPtr(0), storageGPR);
113
114     size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
115     MarkedAllocator* allocatorPtr = &m_jit.vm()->heap.allocatorForObjectWithoutDestructor(allocationSize);
116     m_jit.move(TrustedImmPtr(allocatorPtr), scratchGPR);
117     emitAllocateJSObject(resultGPR, scratchGPR, TrustedImmPtr(structure), storageGPR, scratch2GPR, slowCases);
118
119     if (hasIndexingHeader)
120         m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
121
122     // I want a slow path that also loads out the storage pointer, and that's
123     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
124     // of work for a very small piece of functionality. :-/
125     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
126         slowCases, this, operationNewRawObject, resultGPR, storageGPR,
127         structure, vectorLength));
128
129     if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
130 #if USE(JSVALUE64)
131         m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
132         for (unsigned i = numElements; i < vectorLength; ++i)
133             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
134 #else
135         EncodedValueDescriptor value;
136         value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
137         for (unsigned i = numElements; i < vectorLength; ++i) {
138             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
139             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
140         }
141 #endif
142     }
143     
144     if (hasIndexingHeader)
145         m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
146 }
147
148 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
149 {
150     if (inlineCallFrame && !inlineCallFrame->isVarargs())
151         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
152     else {
153         VirtualRegister argumentCountRegister;
154         if (!inlineCallFrame)
155             argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
156         else
157             argumentCountRegister = inlineCallFrame->argumentCountRegister;
158         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
159         if (!includeThis)
160             m_jit.sub32(TrustedImm32(1), lengthGPR);
161     }
162 }
163
164 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
165 {
166     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
167 }
168
169 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
170 {
171     if (origin.inlineCallFrame) {
172         if (origin.inlineCallFrame->isClosureCall) {
173             m_jit.loadPtr(
174                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
175                 calleeGPR);
176         } else {
177             m_jit.move(
178                 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
179                 calleeGPR);
180         }
181     } else
182         m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), calleeGPR);
183 }
184
185 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
186 {
187     m_jit.addPtr(
188         TrustedImm32(
189             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
190         GPRInfo::callFrameRegister, startGPR);
191 }
192
193 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
194 {
195     if (!doOSRExitFuzzing())
196         return MacroAssembler::Jump();
197     
198     MacroAssembler::Jump result;
199     
200     m_jit.pushToSave(GPRInfo::regT0);
201     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
202     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
203     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
204     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
205     unsigned at = Options::fireOSRExitFuzzAt();
206     if (at || atOrAfter) {
207         unsigned threshold;
208         MacroAssembler::RelationalCondition condition;
209         if (atOrAfter) {
210             threshold = atOrAfter;
211             condition = MacroAssembler::Below;
212         } else {
213             threshold = at;
214             condition = MacroAssembler::NotEqual;
215         }
216         MacroAssembler::Jump ok = m_jit.branch32(
217             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
218         m_jit.popToRestore(GPRInfo::regT0);
219         result = m_jit.jump();
220         ok.link(&m_jit);
221     }
222     m_jit.popToRestore(GPRInfo::regT0);
223     
224     return result;
225 }
226
227 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
228 {
229     if (!m_compileOkay)
230         return;
231     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
232     if (fuzzJump.isSet()) {
233         JITCompiler::JumpList jumpsToFail;
234         jumpsToFail.append(fuzzJump);
235         jumpsToFail.append(jumpToFail);
236         m_jit.appendExitInfo(jumpsToFail);
237     } else
238         m_jit.appendExitInfo(jumpToFail);
239     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
240 }
241
242 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
243 {
244     if (!m_compileOkay)
245         return;
246     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
247     if (fuzzJump.isSet()) {
248         JITCompiler::JumpList myJumpsToFail;
249         myJumpsToFail.append(jumpsToFail);
250         myJumpsToFail.append(fuzzJump);
251         m_jit.appendExitInfo(myJumpsToFail);
252     } else
253         m_jit.appendExitInfo(jumpsToFail);
254     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
255 }
256
257 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
258 {
259     if (!m_compileOkay)
260         return OSRExitJumpPlaceholder();
261     unsigned index = m_jit.jitCode()->osrExit.size();
262     m_jit.appendExitInfo();
263     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
264     return OSRExitJumpPlaceholder(index);
265 }
266
267 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
268 {
269     return speculationCheck(kind, jsValueSource, nodeUse.node());
270 }
271
272 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
273 {
274     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
275 }
276
277 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
278 {
279     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
280 }
281
282 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
283 {
284     if (!m_compileOkay)
285         return;
286     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
287     m_jit.appendExitInfo(jumpToFail);
288     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
289 }
290
291 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
292 {
293     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
294 }
295
296 void SpeculativeJIT::emitInvalidationPoint(Node* node)
297 {
298     if (!m_compileOkay)
299         return;
300     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
301     m_jit.jitCode()->appendOSRExit(OSRExit(
302         UncountableInvalidation, JSValueSource(),
303         m_jit.graph().methodOfGettingAValueProfileFor(node),
304         this, m_stream->size()));
305     info.m_replacementSource = m_jit.watchpointLabel();
306     ASSERT(info.m_replacementSource.isSet());
307     noResult(node);
308 }
309
310 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
311 {
312     if (!m_compileOkay)
313         return;
314     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
315     m_compileOkay = false;
316     if (verboseCompilationEnabled())
317         dataLog("Bailing compilation.\n");
318 }
319
320 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
321 {
322     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
323 }
324
325 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind exitKind)
326 {
327     ASSERT(needsTypeCheck(edge, typesPassedThrough));
328     m_interpreter.filter(edge, typesPassedThrough);
329     speculationCheck(exitKind, source, edge.node(), jumpToFail);
330 }
331
332 RegisterSet SpeculativeJIT::usedRegisters()
333 {
334     RegisterSet result;
335     
336     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
337         GPRReg gpr = GPRInfo::toRegister(i);
338         if (m_gprs.isInUse(gpr))
339             result.set(gpr);
340     }
341     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
342         FPRReg fpr = FPRInfo::toRegister(i);
343         if (m_fprs.isInUse(fpr))
344             result.set(fpr);
345     }
346     
347     result.merge(RegisterSet::stubUnavailableRegisters());
348     
349     return result;
350 }
351
352 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
353 {
354     m_slowPathGenerators.append(WTFMove(slowPathGenerator));
355 }
356
357 void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder)
358 {
359     for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i) {
360         pcToCodeOriginMapBuilder.appendItem(m_jit.label(), m_slowPathGenerators[i]->origin().semantic);
361         m_slowPathGenerators[i]->generate(this);
362     }
363 }
364
365 // On Windows we need to wrap fmod; on other platforms we can call it directly.
366 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
367 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
368 static double JIT_OPERATION fmodAsDFGOperation(double x, double y)
369 {
370     return fmod(x, y);
371 }
372 #else
373 #define fmodAsDFGOperation fmod
374 #endif
375
376 void SpeculativeJIT::clearGenerationInfo()
377 {
378     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
379         m_generationInfo[i] = GenerationInfo();
380     m_gprs = RegisterBank<GPRInfo>();
381     m_fprs = RegisterBank<FPRInfo>();
382 }
383
384 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
385 {
386     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
387     Node* node = info.node();
388     DataFormat registerFormat = info.registerFormat();
389     ASSERT(registerFormat != DataFormatNone);
390     ASSERT(registerFormat != DataFormatDouble);
391         
392     SilentSpillAction spillAction;
393     SilentFillAction fillAction;
394         
395     if (!info.needsSpill())
396         spillAction = DoNothingForSpill;
397     else {
398 #if USE(JSVALUE64)
399         ASSERT(info.gpr() == source);
400         if (registerFormat == DataFormatInt32)
401             spillAction = Store32Payload;
402         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
403             spillAction = StorePtr;
404         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
405             spillAction = Store64;
406         else {
407             ASSERT(registerFormat & DataFormatJS);
408             spillAction = Store64;
409         }
410 #elif USE(JSVALUE32_64)
411         if (registerFormat & DataFormatJS) {
412             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
413             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
414         } else {
415             ASSERT(info.gpr() == source);
416             spillAction = Store32Payload;
417         }
418 #endif
419     }
420         
421     if (registerFormat == DataFormatInt32) {
422         ASSERT(info.gpr() == source);
423         ASSERT(isJSInt32(info.registerFormat()));
424         if (node->hasConstant()) {
425             ASSERT(node->isInt32Constant());
426             fillAction = SetInt32Constant;
427         } else
428             fillAction = Load32Payload;
429     } else if (registerFormat == DataFormatBoolean) {
430 #if USE(JSVALUE64)
431         RELEASE_ASSERT_NOT_REACHED();
432 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
433         fillAction = DoNothingForFill;
434 #endif
435 #elif USE(JSVALUE32_64)
436         ASSERT(info.gpr() == source);
437         if (node->hasConstant()) {
438             ASSERT(node->isBooleanConstant());
439             fillAction = SetBooleanConstant;
440         } else
441             fillAction = Load32Payload;
442 #endif
443     } else if (registerFormat == DataFormatCell) {
444         ASSERT(info.gpr() == source);
445         if (node->hasConstant()) {
446             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
447             node->asCell(); // To get the assertion.
448             fillAction = SetCellConstant;
449         } else {
450 #if USE(JSVALUE64)
451             fillAction = LoadPtr;
452 #else
453             fillAction = Load32Payload;
454 #endif
455         }
456     } else if (registerFormat == DataFormatStorage) {
457         ASSERT(info.gpr() == source);
458         fillAction = LoadPtr;
459     } else if (registerFormat == DataFormatInt52) {
460         if (node->hasConstant())
461             fillAction = SetInt52Constant;
462         else if (info.spillFormat() == DataFormatInt52)
463             fillAction = Load64;
464         else if (info.spillFormat() == DataFormatStrictInt52)
465             fillAction = Load64ShiftInt52Left;
466         else if (info.spillFormat() == DataFormatNone)
467             fillAction = Load64;
468         else {
469             RELEASE_ASSERT_NOT_REACHED();
470 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
471             fillAction = Load64; // Make GCC happy.
472 #endif
473         }
474     } else if (registerFormat == DataFormatStrictInt52) {
475         if (node->hasConstant())
476             fillAction = SetStrictInt52Constant;
477         else if (info.spillFormat() == DataFormatInt52)
478             fillAction = Load64ShiftInt52Right;
479         else if (info.spillFormat() == DataFormatStrictInt52)
480             fillAction = Load64;
481         else if (info.spillFormat() == DataFormatNone)
482             fillAction = Load64;
483         else {
484             RELEASE_ASSERT_NOT_REACHED();
485 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
486             fillAction = Load64; // Make GCC happy.
487 #endif
488         }
489     } else {
490         ASSERT(registerFormat & DataFormatJS);
491 #if USE(JSVALUE64)
492         ASSERT(info.gpr() == source);
493         if (node->hasConstant()) {
494             if (node->isCellConstant())
495                 fillAction = SetTrustedJSConstant;
496             else
497                 fillAction = SetJSConstant;
498         } else if (info.spillFormat() == DataFormatInt32) {
499             ASSERT(registerFormat == DataFormatJSInt32);
500             fillAction = Load32PayloadBoxInt;
501         } else
502             fillAction = Load64;
503 #else
504         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
505         if (node->hasConstant())
506             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
507         else if (info.payloadGPR() == source)
508             fillAction = Load32Payload;
509         else { // Fill the Tag
510             switch (info.spillFormat()) {
511             case DataFormatInt32:
512                 ASSERT(registerFormat == DataFormatJSInt32);
513                 fillAction = SetInt32Tag;
514                 break;
515             case DataFormatCell:
516                 ASSERT(registerFormat == DataFormatJSCell);
517                 fillAction = SetCellTag;
518                 break;
519             case DataFormatBoolean:
520                 ASSERT(registerFormat == DataFormatJSBoolean);
521                 fillAction = SetBooleanTag;
522                 break;
523             default:
524                 fillAction = Load32Tag;
525                 break;
526             }
527         }
528 #endif
529     }
530         
531     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
532 }
533     
534 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
535 {
536     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
537     Node* node = info.node();
538     ASSERT(info.registerFormat() == DataFormatDouble);
539
540     SilentSpillAction spillAction;
541     SilentFillAction fillAction;
542         
543     if (!info.needsSpill())
544         spillAction = DoNothingForSpill;
545     else {
546         ASSERT(!node->hasConstant());
547         ASSERT(info.spillFormat() == DataFormatNone);
548         ASSERT(info.fpr() == source);
549         spillAction = StoreDouble;
550     }
551         
552 #if USE(JSVALUE64)
553     if (node->hasConstant()) {
554         node->asNumber(); // To get the assertion.
555         fillAction = SetDoubleConstant;
556     } else {
557         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
558         fillAction = LoadDouble;
559     }
560 #elif USE(JSVALUE32_64)
561     ASSERT(info.registerFormat() == DataFormatDouble);
562     if (node->hasConstant()) {
563         node->asNumber(); // To get the assertion.
564         fillAction = SetDoubleConstant;
565     } else
566         fillAction = LoadDouble;
567 #endif
568
569     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
570 }
571     
572 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
573 {
574     switch (plan.spillAction()) {
575     case DoNothingForSpill:
576         break;
577     case Store32Tag:
578         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
579         break;
580     case Store32Payload:
581         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
582         break;
583     case StorePtr:
584         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
585         break;
586 #if USE(JSVALUE64)
587     case Store64:
588         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
589         break;
590 #endif
591     case StoreDouble:
592         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
593         break;
594     default:
595         RELEASE_ASSERT_NOT_REACHED();
596     }
597 }
598     
599 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
600 {
601 #if USE(JSVALUE32_64)
602     UNUSED_PARAM(canTrample);
603 #endif
604     switch (plan.fillAction()) {
605     case DoNothingForFill:
606         break;
607     case SetInt32Constant:
608         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
609         break;
610 #if USE(JSVALUE64)
611     case SetInt52Constant:
612         m_jit.move(Imm64(plan.node()->asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
613         break;
614     case SetStrictInt52Constant:
615         m_jit.move(Imm64(plan.node()->asMachineInt()), plan.gpr());
616         break;
617 #endif // USE(JSVALUE64)
618     case SetBooleanConstant:
619         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
620         break;
621     case SetCellConstant:
622         m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
623         break;
624 #if USE(JSVALUE64)
625     case SetTrustedJSConstant:
626         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
627         break;
628     case SetJSConstant:
629         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
630         break;
631     case SetDoubleConstant:
632         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
633         m_jit.move64ToDouble(canTrample, plan.fpr());
634         break;
635     case Load32PayloadBoxInt:
636         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
637         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
638         break;
639     case Load32PayloadConvertToInt52:
640         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
641         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
642         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
643         break;
644     case Load32PayloadSignExtend:
645         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
646         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
647         break;
648 #else
649     case SetJSConstantTag:
650         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
651         break;
652     case SetJSConstantPayload:
653         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
654         break;
655     case SetInt32Tag:
656         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
657         break;
658     case SetCellTag:
659         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
660         break;
661     case SetBooleanTag:
662         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
663         break;
664     case SetDoubleConstant:
665         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
666         break;
667 #endif
668     case Load32Tag:
669         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
670         break;
671     case Load32Payload:
672         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
673         break;
674     case LoadPtr:
675         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
676         break;
677 #if USE(JSVALUE64)
678     case Load64:
679         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
680         break;
681     case Load64ShiftInt52Right:
682         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
683         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
684         break;
685     case Load64ShiftInt52Left:
686         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
687         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
688         break;
689 #endif
690     case LoadDouble:
691         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
692         break;
693     default:
694         RELEASE_ASSERT_NOT_REACHED();
695     }
696 }
697     
698 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
699 {
700     switch (arrayMode.arrayClass()) {
701     case Array::OriginalArray: {
702         CRASH();
703 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
704         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
705         return result;
706 #endif
707     }
708         
709     case Array::Array:
710         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
711         return m_jit.branch32(
712             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
713         
714     case Array::NonArray:
715     case Array::OriginalNonArray:
716         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
717         return m_jit.branch32(
718             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
719         
720     case Array::PossiblyArray:
721         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
722         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
723     }
724     
725     RELEASE_ASSERT_NOT_REACHED();
726     return JITCompiler::Jump();
727 }
728
729 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
730 {
731     JITCompiler::JumpList result;
732     
733     switch (arrayMode.type()) {
734     case Array::Int32:
735         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
736
737     case Array::Double:
738         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
739
740     case Array::Contiguous:
741         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
742
743     case Array::Undecided:
744         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
745
746     case Array::ArrayStorage:
747     case Array::SlowPutArrayStorage: {
748         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
749         
750         if (arrayMode.isJSArray()) {
751             if (arrayMode.isSlowPut()) {
752                 result.append(
753                     m_jit.branchTest32(
754                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
755                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
756                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
757                 result.append(
758                     m_jit.branch32(
759                         MacroAssembler::Above, tempGPR,
760                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
761                 break;
762             }
763             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
764             result.append(
765                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
766             break;
767         }
768         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
769         if (arrayMode.isSlowPut()) {
770             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
771             result.append(
772                 m_jit.branch32(
773                     MacroAssembler::Above, tempGPR,
774                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
775             break;
776         }
777         result.append(
778             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
779         break;
780     }
781     default:
782         CRASH();
783         break;
784     }
785     
786     return result;
787 }
788
789 void SpeculativeJIT::checkArray(Node* node)
790 {
791     ASSERT(node->arrayMode().isSpecific());
792     ASSERT(!node->arrayMode().doesConversion());
793     
794     SpeculateCellOperand base(this, node->child1());
795     GPRReg baseReg = base.gpr();
796     
797     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
798         noResult(m_currentNode);
799         return;
800     }
801     
802     const ClassInfo* expectedClassInfo = 0;
803     
804     switch (node->arrayMode().type()) {
805     case Array::AnyTypedArray:
806     case Array::String:
807         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
808         break;
809     case Array::Int32:
810     case Array::Double:
811     case Array::Contiguous:
812     case Array::Undecided:
813     case Array::ArrayStorage:
814     case Array::SlowPutArrayStorage: {
815         GPRTemporary temp(this);
816         GPRReg tempGPR = temp.gpr();
817         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
818         speculationCheck(
819             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
820             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
821         
822         noResult(m_currentNode);
823         return;
824     }
825     case Array::DirectArguments:
826         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
827         noResult(m_currentNode);
828         return;
829     case Array::ScopedArguments:
830         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
831         noResult(m_currentNode);
832         return;
833     default:
834         speculateCellTypeWithoutTypeFiltering(
835             node->child1(), baseReg,
836             typeForTypedArrayType(node->arrayMode().typedArrayType()));
837         noResult(m_currentNode);
838         return;
839     }
840     
841     RELEASE_ASSERT(expectedClassInfo);
842     
843     GPRTemporary temp(this);
844     GPRTemporary temp2(this);
845     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
846     speculationCheck(
847         BadType, JSValueSource::unboxedCell(baseReg), node,
848         m_jit.branchPtr(
849             MacroAssembler::NotEqual,
850             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
851             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
852     
853     noResult(m_currentNode);
854 }
855
856 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
857 {
858     ASSERT(node->arrayMode().doesConversion());
859     
860     GPRTemporary temp(this);
861     GPRTemporary structure;
862     GPRReg tempGPR = temp.gpr();
863     GPRReg structureGPR = InvalidGPRReg;
864     
865     if (node->op() != ArrayifyToStructure) {
866         GPRTemporary realStructure(this);
867         structure.adopt(realStructure);
868         structureGPR = structure.gpr();
869     }
870         
871     // We can skip all that comes next if we already have array storage.
872     MacroAssembler::JumpList slowPath;
873     
874     if (node->op() == ArrayifyToStructure) {
875         slowPath.append(m_jit.branchWeakStructure(
876             JITCompiler::NotEqual,
877             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
878             node->structure()));
879     } else {
880         m_jit.load8(
881             MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
882         
883         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
884     }
885     
886     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
887         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
888     
889     noResult(m_currentNode);
890 }
891
892 void SpeculativeJIT::arrayify(Node* node)
893 {
894     ASSERT(node->arrayMode().isSpecific());
895     
896     SpeculateCellOperand base(this, node->child1());
897     
898     if (!node->child2()) {
899         arrayify(node, base.gpr(), InvalidGPRReg);
900         return;
901     }
902     
903     SpeculateInt32Operand property(this, node->child2());
904     
905     arrayify(node, base.gpr(), property.gpr());
906 }
907
908 GPRReg SpeculativeJIT::fillStorage(Edge edge)
909 {
910     VirtualRegister virtualRegister = edge->virtualRegister();
911     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
912     
913     switch (info.registerFormat()) {
914     case DataFormatNone: {
915         if (info.spillFormat() == DataFormatStorage) {
916             GPRReg gpr = allocate();
917             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
918             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
919             info.fillStorage(*m_stream, gpr);
920             return gpr;
921         }
922         
923         // Must be a cell; fill it as a cell and then return the pointer.
924         return fillSpeculateCell(edge);
925     }
926         
927     case DataFormatStorage: {
928         GPRReg gpr = info.gpr();
929         m_gprs.lock(gpr);
930         return gpr;
931     }
932         
933     default:
934         return fillSpeculateCell(edge);
935     }
936 }
937
938 void SpeculativeJIT::useChildren(Node* node)
939 {
940     if (node->flags() & NodeHasVarArgs) {
941         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
942             if (!!m_jit.graph().m_varArgChildren[childIdx])
943                 use(m_jit.graph().m_varArgChildren[childIdx]);
944         }
945     } else {
946         Edge child1 = node->child1();
947         if (!child1) {
948             ASSERT(!node->child2() && !node->child3());
949             return;
950         }
951         use(child1);
952         
953         Edge child2 = node->child2();
954         if (!child2) {
955             ASSERT(!node->child3());
956             return;
957         }
958         use(child2);
959         
960         Edge child3 = node->child3();
961         if (!child3)
962             return;
963         use(child3);
964     }
965 }
966
967 void SpeculativeJIT::compileIn(Node* node)
968 {
969     SpeculateCellOperand base(this, node->child2());
970     GPRReg baseGPR = base.gpr();
971     
972     if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
973         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
974             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
975             
976             GPRTemporary result(this);
977             GPRReg resultGPR = result.gpr();
978
979             use(node->child1());
980             
981             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
982             MacroAssembler::Label done = m_jit.label();
983             
984             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
985             // we can cast it to const AtomicStringImpl* safely.
986             auto slowPath = slowPathCall(
987                 jump.m_jump, this, operationInOptimize,
988                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
989                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
990             
991             stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
992             stubInfo->codeOrigin = node->origin.semantic;
993             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
994             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
995 #if USE(JSVALUE32_64)
996             stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
997             stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
998 #endif
999             stubInfo->patch.usedRegisters = usedRegisters();
1000
1001             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
1002             addSlowPathGenerator(WTFMove(slowPath));
1003
1004             base.use();
1005
1006             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1007             return;
1008         }
1009     }
1010
1011     JSValueOperand key(this, node->child1());
1012     JSValueRegs regs = key.jsValueRegs();
1013         
1014     GPRFlushedCallResult result(this);
1015     GPRReg resultGPR = result.gpr();
1016         
1017     base.use();
1018     key.use();
1019         
1020     flushRegisters();
1021     callOperation(
1022         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
1023         baseGPR, regs);
1024     m_jit.exceptionCheck();
1025     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1026 }
1027
1028 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1029 {
1030     unsigned branchIndexInBlock = detectPeepHoleBranch();
1031     if (branchIndexInBlock != UINT_MAX) {
1032         Node* branchNode = m_block->at(branchIndexInBlock);
1033
1034         ASSERT(node->adjustedRefCount() == 1);
1035         
1036         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1037     
1038         m_indexInBlock = branchIndexInBlock;
1039         m_currentNode = branchNode;
1040         
1041         return true;
1042     }
1043     
1044     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1045     
1046     return false;
1047 }
1048
1049 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1050 {
1051     unsigned branchIndexInBlock = detectPeepHoleBranch();
1052     if (branchIndexInBlock != UINT_MAX) {
1053         Node* branchNode = m_block->at(branchIndexInBlock);
1054
1055         ASSERT(node->adjustedRefCount() == 1);
1056         
1057         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1058     
1059         m_indexInBlock = branchIndexInBlock;
1060         m_currentNode = branchNode;
1061         
1062         return true;
1063     }
1064     
1065     nonSpeculativeNonPeepholeStrictEq(node, invert);
1066     
1067     return false;
1068 }
1069
1070 static const char* dataFormatString(DataFormat format)
1071 {
1072     // These values correspond to the DataFormat enum.
1073     const char* strings[] = {
1074         "[  ]",
1075         "[ i]",
1076         "[ d]",
1077         "[ c]",
1078         "Err!",
1079         "Err!",
1080         "Err!",
1081         "Err!",
1082         "[J ]",
1083         "[Ji]",
1084         "[Jd]",
1085         "[Jc]",
1086         "Err!",
1087         "Err!",
1088         "Err!",
1089         "Err!",
1090     };
1091     return strings[format];
1092 }
1093
1094 void SpeculativeJIT::dump(const char* label)
1095 {
1096     if (label)
1097         dataLogF("<%s>\n", label);
1098
1099     dataLogF("  gprs:\n");
1100     m_gprs.dump();
1101     dataLogF("  fprs:\n");
1102     m_fprs.dump();
1103     dataLogF("  VirtualRegisters:\n");
1104     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1105         GenerationInfo& info = m_generationInfo[i];
1106         if (info.alive())
1107             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1108         else
1109             dataLogF("    % 3d:[__][__]", i);
1110         if (info.registerFormat() == DataFormatDouble)
1111             dataLogF(":fpr%d\n", info.fpr());
1112         else if (info.registerFormat() != DataFormatNone
1113 #if USE(JSVALUE32_64)
1114             && !(info.registerFormat() & DataFormatJS)
1115 #endif
1116             ) {
1117             ASSERT(info.gpr() != InvalidGPRReg);
1118             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1119         } else
1120             dataLogF("\n");
1121     }
1122     if (label)
1123         dataLogF("</%s>\n", label);
1124 }
1125
1126 GPRTemporary::GPRTemporary()
1127     : m_jit(0)
1128     , m_gpr(InvalidGPRReg)
1129 {
1130 }
1131
1132 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1133     : m_jit(jit)
1134     , m_gpr(InvalidGPRReg)
1135 {
1136     m_gpr = m_jit->allocate();
1137 }
1138
1139 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1140     : m_jit(jit)
1141     , m_gpr(InvalidGPRReg)
1142 {
1143     m_gpr = m_jit->allocate(specific);
1144 }
1145
1146 #if USE(JSVALUE32_64)
1147 GPRTemporary::GPRTemporary(
1148     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1149     : m_jit(jit)
1150     , m_gpr(InvalidGPRReg)
1151 {
1152     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1153         m_gpr = m_jit->reuse(op1.gpr(which));
1154     else
1155         m_gpr = m_jit->allocate();
1156 }
1157 #endif // USE(JSVALUE32_64)
1158
1159 JSValueRegsTemporary::JSValueRegsTemporary() { }
1160
1161 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1162 #if USE(JSVALUE64)
1163     : m_gpr(jit)
1164 #else
1165     : m_payloadGPR(jit)
1166     , m_tagGPR(jit)
1167 #endif
1168 {
1169 }
1170
1171 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1172
1173 JSValueRegs JSValueRegsTemporary::regs()
1174 {
1175 #if USE(JSVALUE64)
1176     return JSValueRegs(m_gpr.gpr());
1177 #else
1178     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1179 #endif
1180 }
1181
1182 void GPRTemporary::adopt(GPRTemporary& other)
1183 {
1184     ASSERT(!m_jit);
1185     ASSERT(m_gpr == InvalidGPRReg);
1186     ASSERT(other.m_jit);
1187     ASSERT(other.m_gpr != InvalidGPRReg);
1188     m_jit = other.m_jit;
1189     m_gpr = other.m_gpr;
1190     other.m_jit = 0;
1191     other.m_gpr = InvalidGPRReg;
1192 }
1193
1194 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1195     : m_jit(jit)
1196     , m_fpr(InvalidFPRReg)
1197 {
1198     m_fpr = m_jit->fprAllocate();
1199 }
1200
1201 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1202     : m_jit(jit)
1203     , m_fpr(InvalidFPRReg)
1204 {
1205     if (m_jit->canReuse(op1.node()))
1206         m_fpr = m_jit->reuse(op1.fpr());
1207     else
1208         m_fpr = m_jit->fprAllocate();
1209 }
1210
1211 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1212     : m_jit(jit)
1213     , m_fpr(InvalidFPRReg)
1214 {
1215     if (m_jit->canReuse(op1.node()))
1216         m_fpr = m_jit->reuse(op1.fpr());
1217     else if (m_jit->canReuse(op2.node()))
1218         m_fpr = m_jit->reuse(op2.fpr());
1219     else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1220         m_fpr = m_jit->reuse(op1.fpr());
1221     else
1222         m_fpr = m_jit->fprAllocate();
1223 }
1224
1225 #if USE(JSVALUE32_64)
1226 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1227     : m_jit(jit)
1228     , m_fpr(InvalidFPRReg)
1229 {
1230     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1231         m_fpr = m_jit->reuse(op1.fpr());
1232     else
1233         m_fpr = m_jit->fprAllocate();
1234 }
1235 #endif
1236
1237 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1238 {
1239     BasicBlock* taken = branchNode->branchData()->taken.block;
1240     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1241     
1242     SpeculateDoubleOperand op1(this, node->child1());
1243     SpeculateDoubleOperand op2(this, node->child2());
1244     
1245     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1246     jump(notTaken);
1247 }
1248
1249 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1250 {
1251     BasicBlock* taken = branchNode->branchData()->taken.block;
1252     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1253
1254     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1255     
1256     if (taken == nextBlock()) {
1257         condition = MacroAssembler::NotEqual;
1258         BasicBlock* tmp = taken;
1259         taken = notTaken;
1260         notTaken = tmp;
1261     }
1262
1263     SpeculateCellOperand op1(this, node->child1());
1264     SpeculateCellOperand op2(this, node->child2());
1265     
1266     GPRReg op1GPR = op1.gpr();
1267     GPRReg op2GPR = op2.gpr();
1268     
1269     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1270         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1271             speculationCheck(
1272                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1273         }
1274         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1275             speculationCheck(
1276                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1277         }
1278     } else {
1279         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1280             speculationCheck(
1281                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1282                 m_jit.branchIfNotObject(op1GPR));
1283         }
1284         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1285             m_jit.branchTest8(
1286                 MacroAssembler::NonZero, 
1287                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1288                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1289
1290         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1291             speculationCheck(
1292                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1293                 m_jit.branchIfNotObject(op2GPR));
1294         }
1295         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1296             m_jit.branchTest8(
1297                 MacroAssembler::NonZero, 
1298                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1299                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1300     }
1301
1302     branchPtr(condition, op1GPR, op2GPR, taken);
1303     jump(notTaken);
1304 }
1305
1306 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1307 {
1308     BasicBlock* taken = branchNode->branchData()->taken.block;
1309     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1310
1311     // The branch instruction will branch to the taken block.
1312     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1313     if (taken == nextBlock()) {
1314         condition = JITCompiler::invert(condition);
1315         BasicBlock* tmp = taken;
1316         taken = notTaken;
1317         notTaken = tmp;
1318     }
1319
1320     if (node->child1()->isInt32Constant()) {
1321         int32_t imm = node->child1()->asInt32();
1322         SpeculateBooleanOperand op2(this, node->child2());
1323         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1324     } else if (node->child2()->isInt32Constant()) {
1325         SpeculateBooleanOperand op1(this, node->child1());
1326         int32_t imm = node->child2()->asInt32();
1327         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1328     } else {
1329         SpeculateBooleanOperand op1(this, node->child1());
1330         SpeculateBooleanOperand op2(this, node->child2());
1331         branch32(condition, op1.gpr(), op2.gpr(), taken);
1332     }
1333
1334     jump(notTaken);
1335 }
1336
1337 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1338 {
1339     BasicBlock* taken = branchNode->branchData()->taken.block;
1340     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1341
1342     // The branch instruction will branch to the taken block.
1343     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1344     if (taken == nextBlock()) {
1345         condition = JITCompiler::invert(condition);
1346         BasicBlock* tmp = taken;
1347         taken = notTaken;
1348         notTaken = tmp;
1349     }
1350
1351     if (node->child1()->isInt32Constant()) {
1352         int32_t imm = node->child1()->asInt32();
1353         SpeculateInt32Operand op2(this, node->child2());
1354         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1355     } else if (node->child2()->isInt32Constant()) {
1356         SpeculateInt32Operand op1(this, node->child1());
1357         int32_t imm = node->child2()->asInt32();
1358         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1359     } else {
1360         SpeculateInt32Operand op1(this, node->child1());
1361         SpeculateInt32Operand op2(this, node->child2());
1362         branch32(condition, op1.gpr(), op2.gpr(), taken);
1363     }
1364
1365     jump(notTaken);
1366 }
1367
1368 // Returns true if the compare is fused with a subsequent branch.
1369 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1370 {
1371     // Fused compare & branch.
1372     unsigned branchIndexInBlock = detectPeepHoleBranch();
1373     if (branchIndexInBlock != UINT_MAX) {
1374         Node* branchNode = m_block->at(branchIndexInBlock);
1375
1376         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1377         // so can be no intervening nodes to also reference the compare. 
1378         ASSERT(node->adjustedRefCount() == 1);
1379
1380         if (node->isBinaryUseKind(Int32Use))
1381             compilePeepHoleInt32Branch(node, branchNode, condition);
1382 #if USE(JSVALUE64)
1383         else if (node->isBinaryUseKind(Int52RepUse))
1384             compilePeepHoleInt52Branch(node, branchNode, condition);
1385 #endif // USE(JSVALUE64)
1386         else if (node->isBinaryUseKind(DoubleRepUse))
1387             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1388         else if (node->op() == CompareEq) {
1389             if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1390                 // Use non-peephole comparison, for now.
1391                 return false;
1392             }
1393             if (node->isBinaryUseKind(BooleanUse))
1394                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1395             else if (node->isBinaryUseKind(SymbolUse))
1396                 compilePeepHoleSymbolEquality(node, branchNode);
1397             else if (node->isBinaryUseKind(ObjectUse))
1398                 compilePeepHoleObjectEquality(node, branchNode);
1399             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1400                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1401             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1402                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1403             else if (!needsTypeCheck(node->child1(), SpecOther))
1404                 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1405             else if (!needsTypeCheck(node->child2(), SpecOther))
1406                 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1407             else {
1408                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1409                 return true;
1410             }
1411         } else {
1412             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1413             return true;
1414         }
1415
1416         use(node->child1());
1417         use(node->child2());
1418         m_indexInBlock = branchIndexInBlock;
1419         m_currentNode = branchNode;
1420         return true;
1421     }
1422     return false;
1423 }
1424
1425 void SpeculativeJIT::noticeOSRBirth(Node* node)
1426 {
1427     if (!node->hasVirtualRegister())
1428         return;
1429     
1430     VirtualRegister virtualRegister = node->virtualRegister();
1431     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1432     
1433     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1434 }
1435
1436 void SpeculativeJIT::compileMovHint(Node* node)
1437 {
1438     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1439     
1440     Node* child = node->child1().node();
1441     noticeOSRBirth(child);
1442     
1443     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1444 }
1445
1446 void SpeculativeJIT::bail(AbortReason reason)
1447 {
1448     if (verboseCompilationEnabled())
1449         dataLog("Bailing compilation.\n");
1450     m_compileOkay = true;
1451     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1452     clearGenerationInfo();
1453 }
1454
1455 void SpeculativeJIT::compileCurrentBlock()
1456 {
1457     ASSERT(m_compileOkay);
1458     
1459     if (!m_block)
1460         return;
1461     
1462     ASSERT(m_block->isReachable);
1463     
1464     m_jit.blockHeads()[m_block->index] = m_jit.label();
1465
1466     if (!m_block->intersectionOfCFAHasVisited) {
1467         // Don't generate code for basic blocks that are unreachable according to CFA.
1468         // But to be sure that nobody has generated a jump to this block, drop in a
1469         // breakpoint here.
1470         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1471         return;
1472     }
1473
1474     m_stream->appendAndLog(VariableEvent::reset());
1475     
1476     m_jit.jitAssertHasValidCallFrame();
1477     m_jit.jitAssertTagsInPlace();
1478     m_jit.jitAssertArgumentCountSane();
1479
1480     m_state.reset();
1481     m_state.beginBasicBlock(m_block);
1482     
1483     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1484         int operand = m_block->variablesAtHead.operandForIndex(i);
1485         Node* node = m_block->variablesAtHead[i];
1486         if (!node)
1487             continue; // No need to record dead SetLocal's.
1488         
1489         VariableAccessData* variable = node->variableAccessData();
1490         DataFormat format;
1491         if (!node->refCount())
1492             continue; // No need to record dead SetLocal's.
1493         format = dataFormatFor(variable->flushFormat());
1494         m_stream->appendAndLog(
1495             VariableEvent::setLocal(
1496                 VirtualRegister(operand),
1497                 variable->machineLocal(),
1498                 format));
1499     }
1500
1501     m_origin = NodeOrigin();
1502     
1503     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1504         m_currentNode = m_block->at(m_indexInBlock);
1505         
1506         // We may have hit a contradiction that the CFA was aware of but that the JIT
1507         // didn't cause directly.
1508         if (!m_state.isValid()) {
1509             bail(DFGBailedAtTopOfBlock);
1510             return;
1511         }
1512
1513         m_interpreter.startExecuting();
1514         m_jit.setForNode(m_currentNode);
1515         m_origin = m_currentNode->origin;
1516         if (validationEnabled())
1517             m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1518         m_lastGeneratedNode = m_currentNode->op();
1519         
1520         ASSERT(m_currentNode->shouldGenerate());
1521         
1522         if (verboseCompilationEnabled()) {
1523             dataLogF(
1524                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1525                 (int)m_currentNode->index(),
1526                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1527             dataLog("\n");
1528         }
1529
1530         if (Options::validateDFGExceptionHandling() && (mayExit(m_jit.graph(), m_currentNode) != DoesNotExit || m_currentNode->isTerminal()))
1531             m_jit.jitReleaseAssertNoException();
1532
1533         m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.label(), m_origin.semantic);
1534
1535         compile(m_currentNode);
1536         
1537         if (belongsInMinifiedGraph(m_currentNode->op()))
1538             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1539         
1540 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1541         m_jit.clearRegisterAllocationOffsets();
1542 #endif
1543         
1544         if (!m_compileOkay) {
1545             bail(DFGBailedAtEndOfNode);
1546             return;
1547         }
1548         
1549         // Make sure that the abstract state is rematerialized for the next node.
1550         m_interpreter.executeEffects(m_indexInBlock);
1551     }
1552     
1553     // Perform the most basic verification that children have been used correctly.
1554     if (!ASSERT_DISABLED) {
1555         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1556             GenerationInfo& info = m_generationInfo[index];
1557             RELEASE_ASSERT(!info.alive());
1558         }
1559     }
1560 }
1561
1562 // If we are making type predictions about our arguments then
1563 // we need to check that they are correct on function entry.
1564 void SpeculativeJIT::checkArgumentTypes()
1565 {
1566     ASSERT(!m_currentNode);
1567     m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1568
1569     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1570         Node* node = m_jit.graph().m_arguments[i];
1571         if (!node) {
1572             // The argument is dead. We don't do any checks for such arguments.
1573             continue;
1574         }
1575         
1576         ASSERT(node->op() == SetArgument);
1577         ASSERT(node->shouldGenerate());
1578
1579         VariableAccessData* variableAccessData = node->variableAccessData();
1580         FlushFormat format = variableAccessData->flushFormat();
1581         
1582         if (format == FlushedJSValue)
1583             continue;
1584         
1585         VirtualRegister virtualRegister = variableAccessData->local();
1586
1587         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1588         
1589 #if USE(JSVALUE64)
1590         switch (format) {
1591         case FlushedInt32: {
1592             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1593             break;
1594         }
1595         case FlushedBoolean: {
1596             GPRTemporary temp(this);
1597             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1598             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1599             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1600             break;
1601         }
1602         case FlushedCell: {
1603             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1604             break;
1605         }
1606         default:
1607             RELEASE_ASSERT_NOT_REACHED();
1608             break;
1609         }
1610 #else
1611         switch (format) {
1612         case FlushedInt32: {
1613             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1614             break;
1615         }
1616         case FlushedBoolean: {
1617             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1618             break;
1619         }
1620         case FlushedCell: {
1621             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1622             break;
1623         }
1624         default:
1625             RELEASE_ASSERT_NOT_REACHED();
1626             break;
1627         }
1628 #endif
1629     }
1630
1631     m_origin = NodeOrigin();
1632 }
1633
1634 bool SpeculativeJIT::compile()
1635 {
1636     checkArgumentTypes();
1637     
1638     ASSERT(!m_currentNode);
1639     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1640         m_jit.setForBlockIndex(blockIndex);
1641         m_block = m_jit.graph().block(blockIndex);
1642         compileCurrentBlock();
1643     }
1644     linkBranches();
1645     return true;
1646 }
1647
1648 void SpeculativeJIT::createOSREntries()
1649 {
1650     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1651         BasicBlock* block = m_jit.graph().block(blockIndex);
1652         if (!block)
1653             continue;
1654         if (!block->isOSRTarget)
1655             continue;
1656         
1657         // Currently we don't have OSR entry trampolines. We could add them
1658         // here if need be.
1659         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1660     }
1661 }
1662
1663 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1664 {
1665     unsigned osrEntryIndex = 0;
1666     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1667         BasicBlock* block = m_jit.graph().block(blockIndex);
1668         if (!block)
1669             continue;
1670         if (!block->isOSRTarget)
1671             continue;
1672         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1673     }
1674     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1675     
1676     if (verboseCompilationEnabled()) {
1677         DumpContext dumpContext;
1678         dataLog("OSR Entries:\n");
1679         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1680             dataLog("    ", inContext(entryData, &dumpContext), "\n");
1681         if (!dumpContext.isEmpty())
1682             dumpContext.dump(WTF::dataFile());
1683     }
1684 }
1685
1686 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1687 {
1688     Edge child3 = m_jit.graph().varArgChild(node, 2);
1689     Edge child4 = m_jit.graph().varArgChild(node, 3);
1690
1691     ArrayMode arrayMode = node->arrayMode();
1692     
1693     GPRReg baseReg = base.gpr();
1694     GPRReg propertyReg = property.gpr();
1695     
1696     SpeculateDoubleOperand value(this, child3);
1697
1698     FPRReg valueReg = value.fpr();
1699     
1700     DFG_TYPE_CHECK(
1701         JSValueRegs(), child3, SpecFullRealNumber,
1702         m_jit.branchDouble(
1703             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1704     
1705     if (!m_compileOkay)
1706         return;
1707     
1708     StorageOperand storage(this, child4);
1709     GPRReg storageReg = storage.gpr();
1710
1711     if (node->op() == PutByValAlias) {
1712         // Store the value to the array.
1713         GPRReg propertyReg = property.gpr();
1714         FPRReg valueReg = value.fpr();
1715         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1716         
1717         noResult(m_currentNode);
1718         return;
1719     }
1720     
1721     GPRTemporary temporary;
1722     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1723
1724     MacroAssembler::Jump slowCase;
1725     
1726     if (arrayMode.isInBounds()) {
1727         speculationCheck(
1728             OutOfBounds, JSValueRegs(), 0,
1729             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1730     } else {
1731         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1732         
1733         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1734         
1735         if (!arrayMode.isOutOfBounds())
1736             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1737         
1738         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1739         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1740         
1741         inBounds.link(&m_jit);
1742     }
1743     
1744     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1745
1746     base.use();
1747     property.use();
1748     value.use();
1749     storage.use();
1750     
1751     if (arrayMode.isOutOfBounds()) {
1752         addSlowPathGenerator(
1753             slowPathCall(
1754                 slowCase, this,
1755                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1756                 NoResult, baseReg, propertyReg, valueReg));
1757     }
1758
1759     noResult(m_currentNode, UseChildrenCalledExplicitly);
1760 }
1761
1762 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1763 {
1764     SpeculateCellOperand string(this, node->child1());
1765     SpeculateStrictInt32Operand index(this, node->child2());
1766     StorageOperand storage(this, node->child3());
1767
1768     GPRReg stringReg = string.gpr();
1769     GPRReg indexReg = index.gpr();
1770     GPRReg storageReg = storage.gpr();
1771     
1772     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1773
1774     // unsigned comparison so we can filter out negative indices and indices that are too large
1775     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1776
1777     GPRTemporary scratch(this);
1778     GPRReg scratchReg = scratch.gpr();
1779
1780     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1781
1782     // Load the character into scratchReg
1783     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1784
1785     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1786     JITCompiler::Jump cont8Bit = m_jit.jump();
1787
1788     is16Bit.link(&m_jit);
1789
1790     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1791
1792     cont8Bit.link(&m_jit);
1793
1794     int32Result(scratchReg, m_currentNode);
1795 }
1796
1797 void SpeculativeJIT::compileGetByValOnString(Node* node)
1798 {
1799     SpeculateCellOperand base(this, node->child1());
1800     SpeculateStrictInt32Operand property(this, node->child2());
1801     StorageOperand storage(this, node->child3());
1802     GPRReg baseReg = base.gpr();
1803     GPRReg propertyReg = property.gpr();
1804     GPRReg storageReg = storage.gpr();
1805
1806     GPRTemporary scratch(this);
1807     GPRReg scratchReg = scratch.gpr();
1808 #if USE(JSVALUE32_64)
1809     GPRTemporary resultTag;
1810     GPRReg resultTagReg = InvalidGPRReg;
1811     if (node->arrayMode().isOutOfBounds()) {
1812         GPRTemporary realResultTag(this);
1813         resultTag.adopt(realResultTag);
1814         resultTagReg = resultTag.gpr();
1815     }
1816 #endif
1817
1818     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1819
1820     // unsigned comparison so we can filter out negative indices and indices that are too large
1821     JITCompiler::Jump outOfBounds = m_jit.branch32(
1822         MacroAssembler::AboveOrEqual, propertyReg,
1823         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1824     if (node->arrayMode().isInBounds())
1825         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1826
1827     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1828
1829     // Load the character into scratchReg
1830     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1831
1832     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1833     JITCompiler::Jump cont8Bit = m_jit.jump();
1834
1835     is16Bit.link(&m_jit);
1836
1837     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1838
1839     JITCompiler::Jump bigCharacter =
1840         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1841
1842     // 8 bit string values don't need the isASCII check.
1843     cont8Bit.link(&m_jit);
1844
1845     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1846     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1847     m_jit.loadPtr(scratchReg, scratchReg);
1848
1849     addSlowPathGenerator(
1850         slowPathCall(
1851             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1852
1853     if (node->arrayMode().isOutOfBounds()) {
1854 #if USE(JSVALUE32_64)
1855         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1856 #endif
1857
1858         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1859         if (globalObject->stringPrototypeChainIsSane()) {
1860             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
1861             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
1862             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
1863             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
1864             // indexed properties either.
1865             // https://bugs.webkit.org/show_bug.cgi?id=144668
1866             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
1867             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
1868             
1869 #if USE(JSVALUE64)
1870             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1871                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
1872 #else
1873             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1874                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
1875                 baseReg, propertyReg));
1876 #endif
1877         } else {
1878 #if USE(JSVALUE64)
1879             addSlowPathGenerator(
1880                 slowPathCall(
1881                     outOfBounds, this, operationGetByValStringInt,
1882                     scratchReg, baseReg, propertyReg));
1883 #else
1884             addSlowPathGenerator(
1885                 slowPathCall(
1886                     outOfBounds, this, operationGetByValStringInt,
1887                     resultTagReg, scratchReg, baseReg, propertyReg));
1888 #endif
1889         }
1890         
1891 #if USE(JSVALUE64)
1892         jsValueResult(scratchReg, m_currentNode);
1893 #else
1894         jsValueResult(resultTagReg, scratchReg, m_currentNode);
1895 #endif
1896     } else
1897         cellResult(scratchReg, m_currentNode);
1898 }
1899
1900 void SpeculativeJIT::compileFromCharCode(Node* node)
1901 {
1902     Edge& child = node->child1();
1903     if (child.useKind() == UntypedUse) {
1904         JSValueOperand opr(this, child);
1905         JSValueRegs oprRegs = opr.jsValueRegs();
1906 #if USE(JSVALUE64)
1907         GPRTemporary result(this);
1908         JSValueRegs resultRegs = JSValueRegs(result.gpr());
1909 #else
1910         GPRTemporary resultTag(this);
1911         GPRTemporary resultPayload(this);
1912         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
1913 #endif
1914         flushRegisters();
1915         callOperation(operationStringFromCharCodeUntyped, resultRegs, oprRegs);
1916         m_jit.exceptionCheck();
1917         
1918         jsValueResult(resultRegs, node);
1919         return;
1920     }
1921
1922     SpeculateStrictInt32Operand property(this, child);
1923     GPRReg propertyReg = property.gpr();
1924     GPRTemporary smallStrings(this);
1925     GPRTemporary scratch(this);
1926     GPRReg scratchReg = scratch.gpr();
1927     GPRReg smallStringsReg = smallStrings.gpr();
1928
1929     JITCompiler::JumpList slowCases;
1930     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
1931     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
1932     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
1933
1934     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
1935     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
1936     cellResult(scratchReg, m_currentNode);
1937 }
1938
1939 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
1940 {
1941     VirtualRegister virtualRegister = node->virtualRegister();
1942     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1943
1944     switch (info.registerFormat()) {
1945     case DataFormatStorage:
1946         RELEASE_ASSERT_NOT_REACHED();
1947
1948     case DataFormatBoolean:
1949     case DataFormatCell:
1950         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1951         return GeneratedOperandTypeUnknown;
1952
1953     case DataFormatNone:
1954     case DataFormatJSCell:
1955     case DataFormatJS:
1956     case DataFormatJSBoolean:
1957     case DataFormatJSDouble:
1958         return GeneratedOperandJSValue;
1959
1960     case DataFormatJSInt32:
1961     case DataFormatInt32:
1962         return GeneratedOperandInteger;
1963
1964     default:
1965         RELEASE_ASSERT_NOT_REACHED();
1966         return GeneratedOperandTypeUnknown;
1967     }
1968 }
1969
1970 void SpeculativeJIT::compileValueToInt32(Node* node)
1971 {
1972     switch (node->child1().useKind()) {
1973 #if USE(JSVALUE64)
1974     case Int52RepUse: {
1975         SpeculateStrictInt52Operand op1(this, node->child1());
1976         GPRTemporary result(this, Reuse, op1);
1977         GPRReg op1GPR = op1.gpr();
1978         GPRReg resultGPR = result.gpr();
1979         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
1980         int32Result(resultGPR, node, DataFormatInt32);
1981         return;
1982     }
1983 #endif // USE(JSVALUE64)
1984         
1985     case DoubleRepUse: {
1986         GPRTemporary result(this);
1987         SpeculateDoubleOperand op1(this, node->child1());
1988         FPRReg fpr = op1.fpr();
1989         GPRReg gpr = result.gpr();
1990         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
1991         
1992         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
1993         
1994         int32Result(gpr, node);
1995         return;
1996     }
1997     
1998     case NumberUse:
1999     case NotCellUse: {
2000         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2001         case GeneratedOperandInteger: {
2002             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2003             GPRTemporary result(this, Reuse, op1);
2004             m_jit.move(op1.gpr(), result.gpr());
2005             int32Result(result.gpr(), node, op1.format());
2006             return;
2007         }
2008         case GeneratedOperandJSValue: {
2009             GPRTemporary result(this);
2010 #if USE(JSVALUE64)
2011             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2012
2013             GPRReg gpr = op1.gpr();
2014             GPRReg resultGpr = result.gpr();
2015             FPRTemporary tempFpr(this);
2016             FPRReg fpr = tempFpr.fpr();
2017
2018             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2019             JITCompiler::JumpList converted;
2020
2021             if (node->child1().useKind() == NumberUse) {
2022                 DFG_TYPE_CHECK(
2023                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
2024                     m_jit.branchTest64(
2025                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2026             } else {
2027                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2028                 
2029                 DFG_TYPE_CHECK(
2030                     JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
2031                 
2032                 // It's not a cell: so true turns into 1 and all else turns into 0.
2033                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2034                 converted.append(m_jit.jump());
2035                 
2036                 isNumber.link(&m_jit);
2037             }
2038
2039             // First, if we get here we have a double encoded as a JSValue
2040             unboxDouble(gpr, resultGpr, fpr);
2041
2042             silentSpillAllRegisters(resultGpr);
2043             callOperation(toInt32, resultGpr, fpr);
2044             silentFillAllRegisters(resultGpr);
2045
2046             converted.append(m_jit.jump());
2047
2048             isInteger.link(&m_jit);
2049             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2050
2051             converted.link(&m_jit);
2052 #else
2053             Node* childNode = node->child1().node();
2054             VirtualRegister virtualRegister = childNode->virtualRegister();
2055             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2056
2057             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2058
2059             GPRReg payloadGPR = op1.payloadGPR();
2060             GPRReg resultGpr = result.gpr();
2061         
2062             JITCompiler::JumpList converted;
2063
2064             if (info.registerFormat() == DataFormatJSInt32)
2065                 m_jit.move(payloadGPR, resultGpr);
2066             else {
2067                 GPRReg tagGPR = op1.tagGPR();
2068                 FPRTemporary tempFpr(this);
2069                 FPRReg fpr = tempFpr.fpr();
2070                 FPRTemporary scratch(this);
2071
2072                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2073
2074                 if (node->child1().useKind() == NumberUse) {
2075                     DFG_TYPE_CHECK(
2076                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2077                         m_jit.branch32(
2078                             MacroAssembler::AboveOrEqual, tagGPR,
2079                             TrustedImm32(JSValue::LowestTag)));
2080                 } else {
2081                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2082                     
2083                     DFG_TYPE_CHECK(
2084                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2085                         m_jit.branchIfCell(op1.jsValueRegs()));
2086                     
2087                     // It's not a cell: so true turns into 1 and all else turns into 0.
2088                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2089                     m_jit.move(TrustedImm32(0), resultGpr);
2090                     converted.append(m_jit.jump());
2091                     
2092                     isBoolean.link(&m_jit);
2093                     m_jit.move(payloadGPR, resultGpr);
2094                     converted.append(m_jit.jump());
2095                     
2096                     isNumber.link(&m_jit);
2097                 }
2098
2099                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2100
2101                 silentSpillAllRegisters(resultGpr);
2102                 callOperation(toInt32, resultGpr, fpr);
2103                 silentFillAllRegisters(resultGpr);
2104
2105                 converted.append(m_jit.jump());
2106
2107                 isInteger.link(&m_jit);
2108                 m_jit.move(payloadGPR, resultGpr);
2109
2110                 converted.link(&m_jit);
2111             }
2112 #endif
2113             int32Result(resultGpr, node);
2114             return;
2115         }
2116         case GeneratedOperandTypeUnknown:
2117             RELEASE_ASSERT(!m_compileOkay);
2118             return;
2119         }
2120         RELEASE_ASSERT_NOT_REACHED();
2121         return;
2122     }
2123     
2124     default:
2125         ASSERT(!m_compileOkay);
2126         return;
2127     }
2128 }
2129
2130 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2131 {
2132     if (doesOverflow(node->arithMode())) {
2133         // We know that this sometimes produces doubles. So produce a double every
2134         // time. This at least allows subsequent code to not have weird conditionals.
2135             
2136         SpeculateInt32Operand op1(this, node->child1());
2137         FPRTemporary result(this);
2138             
2139         GPRReg inputGPR = op1.gpr();
2140         FPRReg outputFPR = result.fpr();
2141             
2142         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2143             
2144         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2145         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2146         positive.link(&m_jit);
2147             
2148         doubleResult(outputFPR, node);
2149         return;
2150     }
2151     
2152     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2153
2154     SpeculateInt32Operand op1(this, node->child1());
2155     GPRTemporary result(this);
2156
2157     m_jit.move(op1.gpr(), result.gpr());
2158
2159     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2160
2161     int32Result(result.gpr(), node, op1.format());
2162 }
2163
2164 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2165 {
2166     SpeculateDoubleOperand op1(this, node->child1());
2167     FPRTemporary scratch(this);
2168     GPRTemporary result(this);
2169     
2170     FPRReg valueFPR = op1.fpr();
2171     FPRReg scratchFPR = scratch.fpr();
2172     GPRReg resultGPR = result.gpr();
2173
2174     JITCompiler::JumpList failureCases;
2175     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2176     m_jit.branchConvertDoubleToInt32(
2177         valueFPR, resultGPR, failureCases, scratchFPR,
2178         shouldCheckNegativeZero(node->arithMode()));
2179     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2180
2181     int32Result(resultGPR, node);
2182 }
2183
2184 void SpeculativeJIT::compileDoubleRep(Node* node)
2185 {
2186     switch (node->child1().useKind()) {
2187     case RealNumberUse: {
2188         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2189         FPRTemporary result(this);
2190         
2191         JSValueRegs op1Regs = op1.jsValueRegs();
2192         FPRReg resultFPR = result.fpr();
2193         
2194 #if USE(JSVALUE64)
2195         GPRTemporary temp(this);
2196         GPRReg tempGPR = temp.gpr();
2197         m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR);
2198 #else
2199         FPRTemporary temp(this);
2200         FPRReg tempFPR = temp.fpr();
2201         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2202 #endif
2203         
2204         JITCompiler::Jump done = m_jit.branchDouble(
2205             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2206         
2207         DFG_TYPE_CHECK(
2208             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2209         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2210         
2211         done.link(&m_jit);
2212         
2213         doubleResult(resultFPR, node);
2214         return;
2215     }
2216     
2217     case NotCellUse:
2218     case NumberUse: {
2219         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2220
2221         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2222         if (isInt32Speculation(possibleTypes)) {
2223             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2224             FPRTemporary result(this);
2225             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2226             doubleResult(result.fpr(), node);
2227             return;
2228         }
2229
2230         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2231         FPRTemporary result(this);
2232
2233 #if USE(JSVALUE64)
2234         GPRTemporary temp(this);
2235
2236         GPRReg op1GPR = op1.gpr();
2237         GPRReg tempGPR = temp.gpr();
2238         FPRReg resultFPR = result.fpr();
2239         JITCompiler::JumpList done;
2240
2241         JITCompiler::Jump isInteger = m_jit.branch64(
2242             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2243
2244         if (node->child1().useKind() == NotCellUse) {
2245             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2246             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2247
2248             static const double zero = 0;
2249             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2250
2251             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2252             done.append(isNull);
2253
2254             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2255                 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2256
2257             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2258             static const double one = 1;
2259             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2260             done.append(m_jit.jump());
2261             done.append(isFalse);
2262
2263             isUndefined.link(&m_jit);
2264             static const double NaN = PNaN;
2265             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2266             done.append(m_jit.jump());
2267
2268             isNumber.link(&m_jit);
2269         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2270             typeCheck(
2271                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2272                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2273         }
2274
2275         unboxDouble(op1GPR, tempGPR, resultFPR);
2276         done.append(m_jit.jump());
2277     
2278         isInteger.link(&m_jit);
2279         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2280         done.link(&m_jit);
2281 #else // USE(JSVALUE64) -> this is the 32_64 case
2282         FPRTemporary temp(this);
2283     
2284         GPRReg op1TagGPR = op1.tagGPR();
2285         GPRReg op1PayloadGPR = op1.payloadGPR();
2286         FPRReg tempFPR = temp.fpr();
2287         FPRReg resultFPR = result.fpr();
2288         JITCompiler::JumpList done;
2289     
2290         JITCompiler::Jump isInteger = m_jit.branch32(
2291             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2292
2293         if (node->child1().useKind() == NotCellUse) {
2294             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2295             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2296
2297             static const double zero = 0;
2298             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2299
2300             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2301             done.append(isNull);
2302
2303             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2304
2305             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2306             static const double one = 1;
2307             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2308             done.append(m_jit.jump());
2309             done.append(isFalse);
2310
2311             isUndefined.link(&m_jit);
2312             static const double NaN = PNaN;
2313             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2314             done.append(m_jit.jump());
2315
2316             isNumber.link(&m_jit);
2317         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2318             typeCheck(
2319                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2320                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2321         }
2322
2323         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2324         done.append(m_jit.jump());
2325     
2326         isInteger.link(&m_jit);
2327         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2328         done.link(&m_jit);
2329 #endif // USE(JSVALUE64)
2330     
2331         doubleResult(resultFPR, node);
2332         return;
2333     }
2334         
2335 #if USE(JSVALUE64)
2336     case Int52RepUse: {
2337         SpeculateStrictInt52Operand value(this, node->child1());
2338         FPRTemporary result(this);
2339         
2340         GPRReg valueGPR = value.gpr();
2341         FPRReg resultFPR = result.fpr();
2342
2343         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2344         
2345         doubleResult(resultFPR, node);
2346         return;
2347     }
2348 #endif // USE(JSVALUE64)
2349         
2350     default:
2351         RELEASE_ASSERT_NOT_REACHED();
2352         return;
2353     }
2354 }
2355
2356 void SpeculativeJIT::compileValueRep(Node* node)
2357 {
2358     switch (node->child1().useKind()) {
2359     case DoubleRepUse: {
2360         SpeculateDoubleOperand value(this, node->child1());
2361         JSValueRegsTemporary result(this);
2362         
2363         FPRReg valueFPR = value.fpr();
2364         JSValueRegs resultRegs = result.regs();
2365         
2366         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2367         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2368         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2369         // local was purified.
2370         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2371             m_jit.purifyNaN(valueFPR);
2372
2373         boxDouble(valueFPR, resultRegs);
2374         
2375         jsValueResult(resultRegs, node);
2376         return;
2377     }
2378         
2379 #if USE(JSVALUE64)
2380     case Int52RepUse: {
2381         SpeculateStrictInt52Operand value(this, node->child1());
2382         GPRTemporary result(this);
2383         
2384         GPRReg valueGPR = value.gpr();
2385         GPRReg resultGPR = result.gpr();
2386         
2387         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2388         
2389         jsValueResult(resultGPR, node);
2390         return;
2391     }
2392 #endif // USE(JSVALUE64)
2393         
2394     default:
2395         RELEASE_ASSERT_NOT_REACHED();
2396         return;
2397     }
2398 }
2399
2400 static double clampDoubleToByte(double d)
2401 {
2402     d += 0.5;
2403     if (!(d > 0))
2404         d = 0;
2405     else if (d > 255)
2406         d = 255;
2407     return d;
2408 }
2409
2410 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2411 {
2412     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2413     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2414     jit.xorPtr(result, result);
2415     MacroAssembler::Jump clamped = jit.jump();
2416     tooBig.link(&jit);
2417     jit.move(JITCompiler::TrustedImm32(255), result);
2418     clamped.link(&jit);
2419     inBounds.link(&jit);
2420 }
2421
2422 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2423 {
2424     // Unordered compare so we pick up NaN
2425     static const double zero = 0;
2426     static const double byteMax = 255;
2427     static const double half = 0.5;
2428     jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2429     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2430     jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2431     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2432     
2433     jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2434     // FIXME: This should probably just use a floating point round!
2435     // https://bugs.webkit.org/show_bug.cgi?id=72054
2436     jit.addDouble(source, scratch);
2437     jit.truncateDoubleToInt32(scratch, result);   
2438     MacroAssembler::Jump truncatedInt = jit.jump();
2439     
2440     tooSmall.link(&jit);
2441     jit.xorPtr(result, result);
2442     MacroAssembler::Jump zeroed = jit.jump();
2443     
2444     tooBig.link(&jit);
2445     jit.move(JITCompiler::TrustedImm32(255), result);
2446     
2447     truncatedInt.link(&jit);
2448     zeroed.link(&jit);
2449
2450 }
2451
2452 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2453 {
2454     if (node->op() == PutByValAlias)
2455         return JITCompiler::Jump();
2456     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2457         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2458     if (view) {
2459         uint32_t length = view->length();
2460         Node* indexNode = m_jit.graph().child(node, 1).node();
2461         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2462             return JITCompiler::Jump();
2463         return m_jit.branch32(
2464             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2465     }
2466     return m_jit.branch32(
2467         MacroAssembler::AboveOrEqual, indexGPR,
2468         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2469 }
2470
2471 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2472 {
2473     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2474     if (!jump.isSet())
2475         return;
2476     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2477 }
2478
2479 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2480 {
2481     ASSERT(isInt(type));
2482     
2483     SpeculateCellOperand base(this, node->child1());
2484     SpeculateStrictInt32Operand property(this, node->child2());
2485     StorageOperand storage(this, node->child3());
2486
2487     GPRReg baseReg = base.gpr();
2488     GPRReg propertyReg = property.gpr();
2489     GPRReg storageReg = storage.gpr();
2490
2491     GPRTemporary result(this);
2492     GPRReg resultReg = result.gpr();
2493
2494     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2495
2496     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2497     switch (elementSize(type)) {
2498     case 1:
2499         if (isSigned(type))
2500             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2501         else
2502             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2503         break;
2504     case 2:
2505         if (isSigned(type))
2506             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2507         else
2508             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2509         break;
2510     case 4:
2511         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2512         break;
2513     default:
2514         CRASH();
2515     }
2516     if (elementSize(type) < 4 || isSigned(type)) {
2517         int32Result(resultReg, node);
2518         return;
2519     }
2520     
2521     ASSERT(elementSize(type) == 4 && !isSigned(type));
2522     if (node->shouldSpeculateInt32()) {
2523         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2524         int32Result(resultReg, node);
2525         return;
2526     }
2527     
2528 #if USE(JSVALUE64)
2529     if (node->shouldSpeculateMachineInt()) {
2530         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2531         strictInt52Result(resultReg, node);
2532         return;
2533     }
2534 #endif
2535     
2536     FPRTemporary fresult(this);
2537     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2538     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2539     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2540     positive.link(&m_jit);
2541     doubleResult(fresult.fpr(), node);
2542 }
2543
2544 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2545 {
2546     ASSERT(isInt(type));
2547     
2548     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2549     GPRReg storageReg = storage.gpr();
2550     
2551     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2552     
2553     GPRTemporary value;
2554     GPRReg valueGPR = InvalidGPRReg;
2555     
2556     if (valueUse->isConstant()) {
2557         JSValue jsValue = valueUse->asJSValue();
2558         if (!jsValue.isNumber()) {
2559             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2560             noResult(node);
2561             return;
2562         }
2563         double d = jsValue.asNumber();
2564         if (isClamped(type)) {
2565             ASSERT(elementSize(type) == 1);
2566             d = clampDoubleToByte(d);
2567         }
2568         GPRTemporary scratch(this);
2569         GPRReg scratchReg = scratch.gpr();
2570         m_jit.move(Imm32(toInt32(d)), scratchReg);
2571         value.adopt(scratch);
2572         valueGPR = scratchReg;
2573     } else {
2574         switch (valueUse.useKind()) {
2575         case Int32Use: {
2576             SpeculateInt32Operand valueOp(this, valueUse);
2577             GPRTemporary scratch(this);
2578             GPRReg scratchReg = scratch.gpr();
2579             m_jit.move(valueOp.gpr(), scratchReg);
2580             if (isClamped(type)) {
2581                 ASSERT(elementSize(type) == 1);
2582                 compileClampIntegerToByte(m_jit, scratchReg);
2583             }
2584             value.adopt(scratch);
2585             valueGPR = scratchReg;
2586             break;
2587         }
2588             
2589 #if USE(JSVALUE64)
2590         case Int52RepUse: {
2591             SpeculateStrictInt52Operand valueOp(this, valueUse);
2592             GPRTemporary scratch(this);
2593             GPRReg scratchReg = scratch.gpr();
2594             m_jit.move(valueOp.gpr(), scratchReg);
2595             if (isClamped(type)) {
2596                 ASSERT(elementSize(type) == 1);
2597                 MacroAssembler::Jump inBounds = m_jit.branch64(
2598                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2599                 MacroAssembler::Jump tooBig = m_jit.branch64(
2600                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2601                 m_jit.move(TrustedImm32(0), scratchReg);
2602                 MacroAssembler::Jump clamped = m_jit.jump();
2603                 tooBig.link(&m_jit);
2604                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2605                 clamped.link(&m_jit);
2606                 inBounds.link(&m_jit);
2607             }
2608             value.adopt(scratch);
2609             valueGPR = scratchReg;
2610             break;
2611         }
2612 #endif // USE(JSVALUE64)
2613             
2614         case DoubleRepUse: {
2615             if (isClamped(type)) {
2616                 ASSERT(elementSize(type) == 1);
2617                 SpeculateDoubleOperand valueOp(this, valueUse);
2618                 GPRTemporary result(this);
2619                 FPRTemporary floatScratch(this);
2620                 FPRReg fpr = valueOp.fpr();
2621                 GPRReg gpr = result.gpr();
2622                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2623                 value.adopt(result);
2624                 valueGPR = gpr;
2625             } else {
2626                 SpeculateDoubleOperand valueOp(this, valueUse);
2627                 GPRTemporary result(this);
2628                 FPRReg fpr = valueOp.fpr();
2629                 GPRReg gpr = result.gpr();
2630                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2631                 m_jit.xorPtr(gpr, gpr);
2632                 MacroAssembler::Jump fixed = m_jit.jump();
2633                 notNaN.link(&m_jit);
2634                 
2635                 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2636                     fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2637                 
2638                 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
2639                 
2640                 fixed.link(&m_jit);
2641                 value.adopt(result);
2642                 valueGPR = gpr;
2643             }
2644             break;
2645         }
2646             
2647         default:
2648             RELEASE_ASSERT_NOT_REACHED();
2649             break;
2650         }
2651     }
2652     
2653     ASSERT_UNUSED(valueGPR, valueGPR != property);
2654     ASSERT(valueGPR != base);
2655     ASSERT(valueGPR != storageReg);
2656     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2657     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2658         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2659         outOfBounds = MacroAssembler::Jump();
2660     }
2661
2662     switch (elementSize(type)) {
2663     case 1:
2664         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2665         break;
2666     case 2:
2667         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2668         break;
2669     case 4:
2670         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2671         break;
2672     default:
2673         CRASH();
2674     }
2675     if (outOfBounds.isSet())
2676         outOfBounds.link(&m_jit);
2677     noResult(node);
2678 }
2679
2680 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2681 {
2682     ASSERT(isFloat(type));
2683     
2684     SpeculateCellOperand base(this, node->child1());
2685     SpeculateStrictInt32Operand property(this, node->child2());
2686     StorageOperand storage(this, node->child3());
2687
2688     GPRReg baseReg = base.gpr();
2689     GPRReg propertyReg = property.gpr();
2690     GPRReg storageReg = storage.gpr();
2691
2692     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2693
2694     FPRTemporary result(this);
2695     FPRReg resultReg = result.fpr();
2696     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2697     switch (elementSize(type)) {
2698     case 4:
2699         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2700         m_jit.convertFloatToDouble(resultReg, resultReg);
2701         break;
2702     case 8: {
2703         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2704         break;
2705     }
2706     default:
2707         RELEASE_ASSERT_NOT_REACHED();
2708     }
2709     
2710     doubleResult(resultReg, node);
2711 }
2712
2713 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2714 {
2715     ASSERT(isFloat(type));
2716     
2717     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2718     GPRReg storageReg = storage.gpr();
2719     
2720     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2721     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2722
2723     SpeculateDoubleOperand valueOp(this, valueUse);
2724     FPRTemporary scratch(this);
2725     FPRReg valueFPR = valueOp.fpr();
2726     FPRReg scratchFPR = scratch.fpr();
2727
2728     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2729     
2730     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2731     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2732         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2733         outOfBounds = MacroAssembler::Jump();
2734     }
2735     
2736     switch (elementSize(type)) {
2737     case 4: {
2738         m_jit.moveDouble(valueFPR, scratchFPR);
2739         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2740         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2741         break;
2742     }
2743     case 8:
2744         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2745         break;
2746     default:
2747         RELEASE_ASSERT_NOT_REACHED();
2748     }
2749     if (outOfBounds.isSet())
2750         outOfBounds.link(&m_jit);
2751     noResult(node);
2752 }
2753
2754 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2755 {
2756     // Check that prototype is an object.
2757     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
2758     
2759     // Initialize scratchReg with the value being checked.
2760     m_jit.move(valueReg, scratchReg);
2761     
2762     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2763     MacroAssembler::Label loop(&m_jit);
2764     MacroAssembler::Jump performDefaultHasInstance = m_jit.branch8(MacroAssembler::Equal,
2765         MacroAssembler::Address(scratchReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType));
2766     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2767     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2768     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2769 #if USE(JSVALUE64)
2770     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2771 #else
2772     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2773 #endif
2774     
2775     // No match - result is false.
2776 #if USE(JSVALUE64)
2777     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2778 #else
2779     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2780 #endif
2781     MacroAssembler::JumpList doneJumps; 
2782     doneJumps.append(m_jit.jump());
2783
2784     performDefaultHasInstance.link(&m_jit);
2785     silentSpillAllRegisters(scratchReg);
2786     callOperation(operationDefaultHasInstance, scratchReg, valueReg, prototypeReg); 
2787     silentFillAllRegisters(scratchReg);
2788     m_jit.exceptionCheck();
2789 #if USE(JSVALUE64)
2790     m_jit.or32(TrustedImm32(ValueFalse), scratchReg);
2791 #endif
2792     doneJumps.append(m_jit.jump());
2793     
2794     isInstance.link(&m_jit);
2795 #if USE(JSVALUE64)
2796     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2797 #else
2798     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2799 #endif
2800     
2801     doneJumps.link(&m_jit);
2802 }
2803
2804 void SpeculativeJIT::compileCheckTypeInfoFlags(Node* node)
2805 {
2806     SpeculateCellOperand base(this, node->child1());
2807
2808     GPRReg baseGPR = base.gpr();
2809
2810     speculationCheck(BadTypeInfoFlags, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(node->typeInfoOperand())));
2811
2812     noResult(node);
2813 }
2814
2815 void SpeculativeJIT::compileInstanceOf(Node* node)
2816 {
2817     if (node->child1().useKind() == UntypedUse) {
2818         // It might not be a cell. Speculate less aggressively.
2819         // Or: it might only be used once (i.e. by us), so we get zero benefit
2820         // from speculating any more aggressively than we absolutely need to.
2821         
2822         JSValueOperand value(this, node->child1());
2823         SpeculateCellOperand prototype(this, node->child2());
2824         GPRTemporary scratch(this);
2825         GPRTemporary scratch2(this);
2826         
2827         GPRReg prototypeReg = prototype.gpr();
2828         GPRReg scratchReg = scratch.gpr();
2829         GPRReg scratch2Reg = scratch2.gpr();
2830         
2831         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
2832         GPRReg valueReg = value.jsValueRegs().payloadGPR();
2833         moveFalseTo(scratchReg);
2834
2835         MacroAssembler::Jump done = m_jit.jump();
2836         
2837         isCell.link(&m_jit);
2838         
2839         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2840         
2841         done.link(&m_jit);
2842
2843         blessedBooleanResult(scratchReg, node);
2844         return;
2845     }
2846     
2847     SpeculateCellOperand value(this, node->child1());
2848     SpeculateCellOperand prototype(this, node->child2());
2849     
2850     GPRTemporary scratch(this);
2851     GPRTemporary scratch2(this);
2852     
2853     GPRReg valueReg = value.gpr();
2854     GPRReg prototypeReg = prototype.gpr();
2855     GPRReg scratchReg = scratch.gpr();
2856     GPRReg scratch2Reg = scratch2.gpr();
2857     
2858     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2859
2860     blessedBooleanResult(scratchReg, node);
2861 }
2862
2863 template<typename SnippetGenerator, J_JITOperation_EJJ snippetSlowPathFunction>
2864 void SpeculativeJIT::emitUntypedBitOp(Node* node)
2865 {
2866     Edge& leftChild = node->child1();
2867     Edge& rightChild = node->child2();
2868
2869     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
2870         JSValueOperand left(this, leftChild);
2871         JSValueOperand right(this, rightChild);
2872         JSValueRegs leftRegs = left.jsValueRegs();
2873         JSValueRegs rightRegs = right.jsValueRegs();
2874 #if USE(JSVALUE64)
2875         GPRTemporary result(this);
2876         JSValueRegs resultRegs = JSValueRegs(result.gpr());
2877 #else
2878         GPRTemporary resultTag(this);
2879         GPRTemporary resultPayload(this);
2880         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2881 #endif
2882         flushRegisters();
2883         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
2884         m_jit.exceptionCheck();
2885
2886         jsValueResult(resultRegs, node);
2887         return;
2888     }
2889
2890     Optional<JSValueOperand> left;
2891     Optional<JSValueOperand> right;
2892
2893     JSValueRegs leftRegs;
2894     JSValueRegs rightRegs;
2895
2896 #if USE(JSVALUE64)
2897     GPRTemporary result(this);
2898     JSValueRegs resultRegs = JSValueRegs(result.gpr());
2899     GPRTemporary scratch(this);
2900     GPRReg scratchGPR = scratch.gpr();
2901 #else
2902     GPRTemporary resultTag(this);
2903     GPRTemporary resultPayload(this);
2904     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2905     GPRReg scratchGPR = resultTag.gpr();
2906 #endif
2907
2908     SnippetOperand leftOperand;
2909     SnippetOperand rightOperand;
2910
2911     // The snippet generator does not support both operands being constant. If the left
2912     // operand is already const, we'll ignore the right operand's constness.
2913     if (leftChild->isInt32Constant())
2914         leftOperand.setConstInt32(leftChild->asInt32());
2915     else if (rightChild->isInt32Constant())
2916         rightOperand.setConstInt32(rightChild->asInt32());
2917
2918     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
2919
2920     if (!leftOperand.isConst()) {
2921         left = JSValueOperand(this, leftChild);
2922         leftRegs = left->jsValueRegs();
2923     }
2924     if (!rightOperand.isConst()) {
2925         right = JSValueOperand(this, rightChild);
2926         rightRegs = right->jsValueRegs();
2927     }
2928
2929     SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
2930     gen.generateFastPath(m_jit);
2931
2932     ASSERT(gen.didEmitFastPath());
2933     gen.endJumpList().append(m_jit.jump());
2934
2935     gen.slowPathJumpList().link(&m_jit);
2936     silentSpillAllRegisters(resultRegs);
2937
2938     if (leftOperand.isConst()) {
2939         leftRegs = resultRegs;
2940         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
2941     } else if (rightOperand.isConst()) {
2942         rightRegs = resultRegs;
2943         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
2944     }
2945
2946     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
2947
2948     silentFillAllRegisters(resultRegs);
2949     m_jit.exceptionCheck();
2950
2951     gen.endJumpList().link(&m_jit);
2952     jsValueResult(resultRegs, node);
2953 }
2954
2955 void SpeculativeJIT::compileBitwiseOp(Node* node)
2956 {
2957     NodeType op = node->op();
2958     Edge& leftChild = node->child1();
2959     Edge& rightChild = node->child2();
2960
2961     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
2962         switch (op) {
2963         case BitAnd:
2964             emitUntypedBitOp<JITBitAndGenerator, operationValueBitAnd>(node);
2965             return;
2966         case BitOr:
2967             emitUntypedBitOp<JITBitOrGenerator, operationValueBitOr>(node);
2968             return;
2969         case BitXor:
2970             emitUntypedBitOp<JITBitXorGenerator, operationValueBitXor>(node);
2971             return;
2972         default:
2973             RELEASE_ASSERT_NOT_REACHED();
2974         }
2975     }
2976
2977     if (leftChild->isInt32Constant()) {
2978         SpeculateInt32Operand op2(this, rightChild);
2979         GPRTemporary result(this, Reuse, op2);
2980
2981         bitOp(op, leftChild->asInt32(), op2.gpr(), result.gpr());
2982
2983         int32Result(result.gpr(), node);
2984
2985     } else if (rightChild->isInt32Constant()) {
2986         SpeculateInt32Operand op1(this, leftChild);
2987         GPRTemporary result(this, Reuse, op1);
2988
2989         bitOp(op, rightChild->asInt32(), op1.gpr(), result.gpr());
2990
2991         int32Result(result.gpr(), node);
2992
2993     } else {
2994         SpeculateInt32Operand op1(this, leftChild);
2995         SpeculateInt32Operand op2(this, rightChild);
2996         GPRTemporary result(this, Reuse, op1, op2);
2997         
2998         GPRReg reg1 = op1.gpr();
2999         GPRReg reg2 = op2.gpr();
3000         bitOp(op, reg1, reg2, result.gpr());
3001         
3002         int32Result(result.gpr(), node);
3003     }
3004 }
3005
3006 void SpeculativeJIT::emitUntypedRightShiftBitOp(Node* node)
3007 {
3008     J_JITOperation_EJJ snippetSlowPathFunction = node->op() == BitRShift
3009         ? operationValueBitRShift : operationValueBitURShift;
3010     JITRightShiftGenerator::ShiftType shiftType = node->op() == BitRShift
3011         ? JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift;
3012
3013     Edge& leftChild = node->child1();
3014     Edge& rightChild = node->child2();
3015
3016     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3017         JSValueOperand left(this, leftChild);
3018         JSValueOperand right(this, rightChild);
3019         JSValueRegs leftRegs = left.jsValueRegs();
3020         JSValueRegs rightRegs = right.jsValueRegs();
3021 #if USE(JSVALUE64)
3022         GPRTemporary result(this);
3023         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3024 #else
3025         GPRTemporary resultTag(this);
3026         GPRTemporary resultPayload(this);
3027         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3028 #endif
3029         flushRegisters();
3030         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3031         m_jit.exceptionCheck();
3032
3033         jsValueResult(resultRegs, node);
3034         return;
3035     }
3036
3037     Optional<JSValueOperand> left;
3038     Optional<JSValueOperand> right;
3039
3040     JSValueRegs leftRegs;
3041     JSValueRegs rightRegs;
3042
3043     FPRTemporary leftNumber(this);
3044     FPRReg leftFPR = leftNumber.fpr();
3045
3046 #if USE(JSVALUE64)
3047     GPRTemporary result(this);
3048     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3049     GPRTemporary scratch(this);
3050     GPRReg scratchGPR = scratch.gpr();
3051     FPRReg scratchFPR = InvalidFPRReg;
3052 #else
3053     GPRTemporary resultTag(this);
3054     GPRTemporary resultPayload(this);
3055     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3056     GPRReg scratchGPR = resultTag.gpr();
3057     FPRTemporary fprScratch(this);
3058     FPRReg scratchFPR = fprScratch.fpr();
3059 #endif
3060
3061     SnippetOperand leftOperand;
3062     SnippetOperand rightOperand;
3063
3064     // The snippet generator does not support both operands being constant. If the left
3065     // operand is already const, we'll ignore the right operand's constness.
3066     if (leftChild->isInt32Constant())
3067         leftOperand.setConstInt32(leftChild->asInt32());
3068     else if (rightChild->isInt32Constant())
3069         rightOperand.setConstInt32(rightChild->asInt32());
3070
3071     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3072
3073     if (!leftOperand.isConst()) {
3074         left = JSValueOperand(this, leftChild);
3075         leftRegs = left->jsValueRegs();
3076     }
3077     if (!rightOperand.isConst()) {
3078         right = JSValueOperand(this, rightChild);
3079         rightRegs = right->jsValueRegs();
3080     }
3081
3082     JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3083         leftFPR, scratchGPR, scratchFPR, shiftType);
3084     gen.generateFastPath(m_jit);
3085
3086     ASSERT(gen.didEmitFastPath());
3087     gen.endJumpList().append(m_jit.jump());
3088
3089     gen.slowPathJumpList().link(&m_jit);
3090     silentSpillAllRegisters(resultRegs);
3091
3092     if (leftOperand.isConst()) {
3093         leftRegs = resultRegs;
3094         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3095     } else if (rightOperand.isConst()) {
3096         rightRegs = resultRegs;
3097         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3098     }
3099
3100     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3101
3102     silentFillAllRegisters(resultRegs);
3103     m_jit.exceptionCheck();
3104
3105     gen.endJumpList().link(&m_jit);
3106     jsValueResult(resultRegs, node);
3107     return;
3108 }
3109
3110 void SpeculativeJIT::compileShiftOp(Node* node)
3111 {
3112     NodeType op = node->op();
3113     Edge& leftChild = node->child1();
3114     Edge& rightChild = node->child2();
3115
3116     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3117         switch (op) {
3118         case BitLShift:
3119             emitUntypedBitOp<JITLeftShiftGenerator, operationValueBitLShift>(node);
3120             return;
3121         case BitRShift:
3122         case BitURShift:
3123             emitUntypedRightShiftBitOp(node);
3124             return;
3125         default:
3126             RELEASE_ASSERT_NOT_REACHED();
3127         }
3128     }
3129
3130     if (rightChild->isInt32Constant()) {
3131         SpeculateInt32Operand op1(this, leftChild);
3132         GPRTemporary result(this, Reuse, op1);
3133
3134         shiftOp(op, op1.gpr(), rightChild->asInt32() & 0x1f, result.gpr());
3135
3136         int32Result(result.gpr(), node);
3137     } else {
3138         // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
3139         SpeculateInt32Operand op1(this, leftChild);
3140         SpeculateInt32Operand op2(this, rightChild);
3141         GPRTemporary result(this, Reuse, op1);
3142
3143         GPRReg reg1 = op1.gpr();
3144         GPRReg reg2 = op2.gpr();
3145         shiftOp(op, reg1, reg2, result.gpr());
3146
3147         int32Result(result.gpr(), node);
3148     }
3149 }
3150
3151 void SpeculativeJIT::compileValueAdd(Node* node)
3152 {
3153     Edge& leftChild = node->child1();
3154     Edge& rightChild = node->child2();
3155
3156     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3157         JSValueOperand left(this, leftChild);
3158         JSValueOperand right(this, rightChild);
3159         JSValueRegs leftRegs = left.jsValueRegs();
3160         JSValueRegs rightRegs = right.jsValueRegs();
3161 #if USE(JSVALUE64)
3162         GPRTemporary result(this);
3163         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3164 #else
3165         GPRTemporary resultTag(this);
3166         GPRTemporary resultPayload(this);
3167         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3168 #endif
3169         flushRegisters();
3170         callOperation(operationValueAddNotNumber, resultRegs, leftRegs, rightRegs);
3171         m_jit.exceptionCheck();
3172     
3173         jsValueResult(resultRegs, node);
3174         return;
3175     }
3176
3177     Optional<JSValueOperand> left;
3178     Optional<JSValueOperand> right;
3179
3180     JSValueRegs leftRegs;
3181     JSValueRegs rightRegs;
3182
3183     FPRTemporary leftNumber(this);
3184     FPRTemporary rightNumber(this);
3185     FPRReg leftFPR = leftNumber.fpr();
3186     FPRReg rightFPR = rightNumber.fpr();
3187
3188 #if USE(JSVALUE64)
3189     GPRTemporary result(this);
3190     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3191     GPRTemporary scratch(this);
3192     GPRReg scratchGPR = scratch.gpr();
3193     FPRReg scratchFPR = InvalidFPRReg;
3194 #else
3195     GPRTemporary resultTag(this);
3196     GPRTemporary resultPayload(this);
3197     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3198     GPRReg scratchGPR = resultTag.gpr();
3199     FPRTemporary fprScratch(this);
3200     FPRReg scratchFPR = fprScratch.fpr();
3201 #endif
3202
3203     SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3204     SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3205
3206     // The snippet generator does not support both operands being constant. If the left
3207     // operand is already const, we'll ignore the right operand's constness.
3208     if (leftChild->isInt32Constant())
3209         leftOperand.setConstInt32(leftChild->asInt32());
3210     else if (rightChild->isInt32Constant())
3211         rightOperand.setConstInt32(rightChild->asInt32());
3212
3213     ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3214
3215     if (!leftOperand.isConst()) {
3216         left = JSValueOperand(this, leftChild);
3217         leftRegs = left->jsValueRegs();
3218     }
3219     if (!rightOperand.isConst()) {
3220         right = JSValueOperand(this, rightChild);
3221         rightRegs = right->jsValueRegs();
3222     }
3223
3224     JITAddGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3225         leftFPR, rightFPR, scratchGPR, scratchFPR);
3226     gen.generateFastPath(m_jit);
3227
3228     ASSERT(gen.didEmitFastPath());
3229     gen.endJumpList().append(m_jit.jump());
3230
3231     gen.slowPathJumpList().link(&m_jit);
3232
3233     silentSpillAllRegisters(resultRegs);
3234
3235     if (leftOperand.isConst()) {
3236         leftRegs = resultRegs;
3237         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3238     } else if (rightOperand.isConst()) {
3239         rightRegs = resultRegs;
3240         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3241     }
3242
3243     callOperation(operationValueAdd, resultRegs, leftRegs, rightRegs);
3244
3245     silentFillAllRegisters(resultRegs);
3246     m_jit.exceptionCheck();
3247
3248     gen.endJumpList().link(&m_jit);
3249     jsValueResult(resultRegs, node);
3250     return;
3251 }
3252
3253 void SpeculativeJIT::compileInstanceOfCustom(Node* node)
3254 {
3255     // We could do something smarter here but this case is currently super rare and unless
3256     // Symbol.hasInstance becomes popular will likely remain that way.
3257
3258     JSValueOperand value(this, node->child1());
3259     SpeculateCellOperand constructor(this, node->child2());
3260     JSValueOperand hasInstanceValue(this, node->child3());
3261     GPRTemporary result(this);
3262
3263     JSValueRegs valueRegs = value.jsValueRegs();
3264     GPRReg constructorGPR = constructor.gpr();
3265     JSValueRegs hasInstanceRegs = hasInstanceValue.jsValueRegs();
3266     GPRReg resultGPR = result.gpr();
3267
3268     MacroAssembler::Jump slowCase = m_jit.jump();
3269
3270     addSlowPathGenerator(slowPathCall(slowCase, this, operationInstanceOfCustom, resultGPR, valueRegs, constructorGPR, hasInstanceRegs));
3271
3272     unblessedBooleanResult(resultGPR, node);
3273 }
3274
3275 void SpeculativeJIT::compileIsJSArray(Node* node)
3276 {
3277     JSValueOperand value(this, node->child1());
3278     GPRFlushedCallResult result(this);
3279
3280     JSValueRegs valueRegs = value.jsValueRegs();
3281     GPRReg resultGPR = result.gpr();
3282
3283     JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3284
3285     m_jit.compare8(JITCompiler::Equal,
3286         JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()),
3287         TrustedImm32(ArrayType),
3288         resultGPR);
3289     blessBoolean(resultGPR);
3290     JITCompiler::Jump done = m_jit.jump();
3291
3292     isNotCell.link(&m_jit);
3293     moveFalseTo(resultGPR);
3294
3295     done.link(&m_jit);
3296     blessedBooleanResult(resultGPR, node);
3297 }
3298
3299 void SpeculativeJIT::compileIsArrayObject(Node* node)
3300 {
3301     JSValueOperand value(this, node->child1());
3302     GPRFlushedCallResult result(this);
3303
3304     JSValueRegs valueRegs = value.jsValueRegs();
3305     GPRReg resultGPR = result.gpr();
3306
3307     JITCompiler::JumpList done;
3308
3309     JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3310
3311     JITCompiler::Jump notJSArray = m_jit.branch8(JITCompiler::NotEqual,
3312         JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()),
3313         TrustedImm32(ArrayType));
3314     m_jit.move(TrustedImm32(true), resultGPR);
3315     done.append(m_jit.jump());
3316
3317     notJSArray.link(&m_jit);
3318     silentSpillAllRegisters(resultGPR);
3319     callOperation(operationIsArrayObject, resultGPR, valueRegs);
3320     silentFillAllRegisters(resultGPR);
3321     m_jit.exceptionCheck();
3322     done.append(m_jit.jump());
3323
3324     isNotCell.link(&m_jit);
3325     m_jit.move(TrustedImm32(false), resultGPR);
3326
3327     done.link(&m_jit);
3328     unblessedBooleanResult(resultGPR, node);
3329 }
3330
3331 // FIXME: This function should just get the ClassInfo and check if it's == ArrayConstructor::info(). https://bugs.webkit.org/show_bug.cgi?id=155667
3332 void SpeculativeJIT::compileIsArrayConstructor(Node* node)
3333 {
3334     JSValueOperand value(this, node->child1());
3335     GPRFlushedCallResult result(this);
3336
3337     JSValueRegs valueRegs = value.jsValueRegs();
3338     GPRReg resultGPR = result.gpr();
3339
3340     flushRegisters();
3341     callOperation(operationIsArrayConstructor, resultGPR, valueRegs);
3342     unblessedBooleanResult(resultGPR, node);
3343 }
3344
3345 void SpeculativeJIT::compileCallObjectConstructor(Node* node)
3346 {
3347     RELEASE_ASSERT(node->child1().useKind() == UntypedUse);
3348     JSValueOperand value(this, node->child1());
3349 #if USE(JSVALUE64)
3350     GPRTemporary result(this, Reuse, value);
3351 #else
3352     GPRTemporary result(this, Reuse, value, PayloadWord);
3353 #endif
3354
3355     JSValueRegs valueRegs = value.jsValueRegs();
3356     GPRReg resultGPR = result.gpr();
3357
3358     MacroAssembler::JumpList slowCases;
3359     slowCases.append(m_jit.branchIfNotCell(valueRegs));
3360     slowCases.append(m_jit.branchIfNotObject(valueRegs.payloadGPR()));
3361     m_jit.move(valueRegs.payloadGPR(), resultGPR);
3362
3363     addSlowPathGenerator(slowPathCall(slowCases, this, operationObjectConstructor, resultGPR, m_jit.globalObjectFor(node->origin.semantic), valueRegs));
3364     cellResult(resultGPR, node);
3365 }
3366
3367 void SpeculativeJIT::compileArithAdd(Node* node)
3368 {
3369     switch (node->binaryUseKind()) {
3370     case Int32Use: {
3371         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3372
3373         if (node->child2()->isInt32Constant()) {
3374             SpeculateInt32Operand op1(this, node->child1());
3375             int32_t imm2 = node->child2()->asInt32();
3376
3377             if (!shouldCheckOverflow(node->arithMode())) {
3378                 GPRTemporary result(this, Reuse, op1);
3379                 m_jit.add32(Imm32(imm2), op1.gpr(), result.gpr());
3380                 int32Result(result.gpr(), node);
3381                 return;
3382             }
3383
3384             GPRTemporary result(this);
3385             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
3386
3387             int32Result(result.gpr(), node);
3388             return;
3389         }
3390                 
3391         SpeculateInt32Operand op1(this, node->child1());
3392         SpeculateInt32Operand op2(this, node->child2());
3393         GPRTemporary result(this, Reuse, op1, op2);
3394
3395         GPRReg gpr1 = op1.gpr();
3396         GPRReg gpr2 = op2.gpr();
3397         GPRReg gprResult = result.gpr();
3398
3399         if (!shouldCheckOverflow(node->arithMode()))
3400             m_jit.add32(gpr1, gpr2, gprResult);
3401         else {
3402             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
3403                 
3404             if (gpr1 == gprResult)
3405                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
3406             else if (gpr2 == gprResult)
3407                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
3408             else
3409                 speculationCheck(Overflow, JSValueRegs(), 0, check);
3410         }
3411
3412         int32Result(gprResult, node);
3413         return;
3414     }
3415         
3416 #if USE(JSVALUE64)
3417     case Int52RepUse: {
3418         ASSERT(shouldCheckOverflow(node->arithMode()));
3419         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3420
3421         // Will we need an overflow check? If we can prove that neither input can be
3422         // Int52 then the overflow check will not be necessary.
3423         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
3424             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
3425             SpeculateWhicheverInt52Operand op1(this, node->child1());
3426             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3427             GPRTemporary result(this, Reuse, op1);
3428             m_jit.add64(op1.gpr(), op2.gpr(), result.gpr());
3429             int52Result(result.gpr(), node, op1.format());
3430             return;
3431         }
3432         
3433         SpeculateInt52Operand op1(this, node->child1());
3434         SpeculateInt52Operand op2(this, node->child2());
3435         GPRTemporary result(this);
3436         m_jit.move(op1.gpr(), result.gpr());
3437         speculationCheck(
3438             Int52Overflow, JSValueRegs(), 0,
3439             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3440         int52Result(result.gpr(), node);
3441         return;
3442     }
3443 #endif // USE(JSVALUE64)
3444     
3445     case DoubleRepUse: {
3446         SpeculateDoubleOperand op1(this, node->child1());
3447         SpeculateDoubleOperand op2(this, node->child2());
3448         FPRTemporary result(this, op1, op2);
3449
3450         FPRReg reg1 = op1.fpr();
3451         FPRReg reg2 = op2.fpr();
3452         m_jit.addDouble(reg1, reg2, result.fpr());
3453
3454         doubleResult(result.fpr(), node);
3455         return;
3456     }
3457         
3458     default:
3459         RELEASE_ASSERT_NOT_REACHED();
3460         break;
3461     }
3462 }
3463
3464 void SpeculativeJIT::compileMakeRope(Node* node)
3465 {
3466     ASSERT(node->child1().useKind() == KnownStringUse);
3467     ASSERT(node->child2().useKind() == KnownStringUse);
3468     ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
3469     
3470     SpeculateCellOperand op1(this, node->child1());
3471     SpeculateCellOperand op2(this, node->child2());
3472     SpeculateCellOperand op3(this, node->child3());
3473     GPRTemporary result(this);
3474     GPRTemporary allocator(this);
3475     GPRTemporary scratch(this);
3476     
3477     GPRReg opGPRs[3];
3478     unsigned numOpGPRs;
3479     opGPRs[0] = op1.gpr();
3480     opGPRs[1] = op2.gpr();
3481     if (node->child3()) {
3482         opGPRs[2] = op3.gpr();
3483         numOpGPRs = 3;
3484     } else {
3485         opGPRs[2] = InvalidGPRReg;
3486         numOpGPRs = 2;
3487     }
3488     GPRReg resultGPR = result.gpr();
3489     GPRReg allocatorGPR = allocator.gpr();
3490     GPRReg scratchGPR = scratch.gpr();
3491     
3492     JITCompiler::JumpList slowPath;
3493     MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithDestructor(sizeof(JSRopeString));
3494     m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
3495     emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
3496         
3497     m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
3498     for (unsigned i = 0; i < numOpGPRs; ++i)
3499         m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3500     for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
3501         m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3502     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
3503     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
3504     if (!ASSERT_DISABLED) {
3505         JITCompiler::Jump ok = m_jit.branch32(
3506             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
3507         m_jit.abortWithReason(DFGNegativeStringLength);
3508         ok.link(&m_jit);
3509     }
3510     for (unsigned i = 1; i < numOpGPRs; ++i) {
3511         m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
3512         speculationCheck(
3513             Uncountable, JSValueSource(), nullptr,
3514             m_jit.branchAdd32(
3515                 JITCompiler::Overflow,
3516                 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
3517     }
3518     m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
3519     m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
3520     if (!ASSERT_DISABLED) {
3521         JITCompiler::Jump ok = m_jit.branch32(
3522             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
3523         m_jit.abortWithReason(DFGNegativeStringLength);
3524         ok.link(&m_jit);
3525     }
3526     m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
3527     
3528     switch (numOpGPRs) {
3529     case 2:
3530         addSlowPathGenerator(slowPathCall(
3531             slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
3532         break;
3533     case 3:
3534         addSlowPathGenerator(slowPathCall(
3535             slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
3536         break;
3537     default:
3538         RELEASE_ASSERT_NOT_REACHED();
3539         break;
3540     }
3541         
3542     cellResult(resultGPR, node);
3543 }
3544
3545 void SpeculativeJIT::compileArithClz32(Node* node)
3546 {
3547     ASSERT_WITH_MESSAGE(node->child1().useKind() == Int32Use || node->child1().useKind() == KnownInt32Use, "The Fixup phase should have enforced a Int32 operand.");
3548     SpeculateInt32Operand value(this, node->child1());
3549     GPRTemporary result(this, Reuse, value);
3550     GPRReg valueReg = value.gpr();
3551     GPRReg resultReg = result.gpr();
3552     m_jit.countLeadingZeros32(valueReg, resultReg);
3553     int32Result(resultReg, node);
3554 }
3555
3556 void SpeculativeJIT::compileArithSub(Node* node)
3557 {
3558     switch (node->binaryUseKind()) {
3559     case Int32Use: {
3560         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3561         
3562         if (node->child2()->isInt32Constant()) {
3563             SpeculateInt32Operand op1(this, node->child1());
3564             int32_t imm2 = node->child2()->asInt32();
3565             GPRTemporary result(this);
3566
3567             if (!shouldCheckOverflow(node->arithMode())) {
3568                 m_jit.move(op1.gpr(), result.gpr());
3569                 m_jit.sub32(Imm32(imm2), result.gpr());
3570             } else {
3571                 GPRTemporary scratch(this);
3572                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
3573             }
3574
3575             int32Result(result.gpr(), node);
3576             return;
3577         }
3578             
3579         if (node->child1()->isInt32Constant()) {
3580             int32_t imm1 = node->child1()->asInt32();
3581             SpeculateInt32Operand op2(this, node->child2());
3582             GPRTemporary result(this);
3583                 
3584             m_jit.move(Imm32(imm1), result.gpr());
3585             if (!shouldCheckOverflow(node->arithMode()))
3586                 m_jit.sub32(op2.gpr(), result.gpr());
3587             else
3588                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3589                 
3590             int32Result(result.gpr(), node);
3591             return;
3592         }
3593             
3594         SpeculateInt32Operand op1(this, node->child1());
3595         SpeculateInt32Operand op2(this, node->child2());
3596         GPRTemporary result(this);
3597
3598         if (!shouldCheckOverflow(node->arithMode())) {
3599             m_jit.move(op1.gpr(), result.gpr());
3600             m_jit.sub32(op2.gpr(), result.gpr());
3601         } else
3602             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
3603
3604         int32Result(result.gpr(), node);
3605         return;
3606     }
3607         
3608 #if USE(JSVALUE64)
3609     case Int52RepUse: {
3610         ASSERT(shouldCheckOverflow(node->arithMode()));
3611         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3612
3613         // Will we need an overflow check? If we can prove that neither input can be
3614         // Int52 then the overflow check will not be necessary.
3615         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
3616             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
3617             SpeculateWhicheverInt52Operand op1(this, node->child1());
3618             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3619             GPRTemporary result(this, Reuse, op1);
3620             m_jit.move(op1.gpr(), result.gpr());
3621             m_jit.sub64(op2.gpr(), result.gpr());
3622             int52Result(result.gpr(), node, op1.format());
3623             return;
3624         }
3625         
3626         SpeculateInt52Operand op1(this, node->child1());
3627         SpeculateInt52Operand op2(this, node->child2());
3628         GPRTemporary result(this);
3629         m_jit.move(op1.gpr(), result.gpr());
3630         speculationCheck(
3631             Int52Overflow, JSValueRegs(), 0,
3632             m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3633         int52Result(result.gpr(), node);
3634         return;
3635     }
3636 #endif // USE(JSVALUE64)
3637
3638     case DoubleRepUse: {
3639         SpeculateDoubleOperand op1(this, node->child1());
3640         SpeculateDoubleOperand op2(this, node->child2());
3641         FPRTemporary result(this, op1);
3642
3643         FPRReg reg1 = op1.fpr();
3644         FPRReg reg2 = op2.fpr();
3645         m_jit.subDouble(reg1, reg2, result.fpr());
3646
3647         doubleResult(result.fpr(), node);
3648         return;
3649     }
3650
3651     case UntypedUse: {
3652         Edge& leftChild = node->child1();
3653         Edge& rightChild = node->child2();
3654
3655         JSValueOperand left(this, leftChild);
3656         JSValueOperand right(this, rightChild);
3657
3658         JSValueRegs leftRegs = left.jsValueRegs();
3659         JSValueRegs rightRegs = right.jsValueRegs();
3660
3661         FPRTemporary leftNumber(this);
3662         FPRTemporary rightNumber(this);
3663         FPRReg leftFPR = leftNumber.fpr();
3664         FPRReg rightFPR = rightNumber.fpr();
3665
3666 #if USE(JSVALUE64)
3667         GPRTemporary result(this);
3668         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3669         GPRTemporary scratch(this);
3670         GPRReg scratchGPR = scratch.gpr();
3671         FPRReg scratchFPR = InvalidFPRReg;
3672 #else
3673         GPRTemporary resultTag(this);
3674         GPRTemporary resultPayload(this);
3675         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3676         GPRReg scratchGPR = resultTag.gpr();
3677         FPRTemporary fprScratch(this);
3678         FPRReg scratchFPR = fprScratch.fpr();
3679 #endif
3680
3681         SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3682         SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3683
3684         JITSubGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3685             leftFPR, rightFPR, scratchGPR, scratchFPR);
3686         gen.generateFastPath(m_jit);
3687
3688         ASSERT(gen.didEmitFastPath());
3689         gen.endJumpList().append(m_jit.jump());
3690
3691         gen.slowPathJumpList().link(&m_jit);
3692         silentSpillAllRegisters(resultRegs);
3693         callOperation(operationValueSub, resultRegs, leftRegs, rightRegs);
3694         silentFillAllRegisters(resultRegs);
3695         m_jit.exceptionCheck();
3696
3697         gen.endJumpList().link(&m_jit);
3698         jsValueResult(resultRegs, node);
3699         return;
3700     }
3701
3702     default:
3703         RELEASE_ASSERT_NOT_REACHED();
3704         return;
3705     }
3706 }
3707
3708 void SpeculativeJIT::compileArithNegate(Node* node)
3709 {
3710     switch (node->child1().useKind()) {
3711     case Int32Use: {
3712         SpeculateInt32Operand op1(this, node->child1());
3713         GPRTemporary result(this);
3714
3715         m_jit.move(op1.gpr(), result.gpr());
3716
3717         // Note: there is no notion of being not used as a number, but someone
3718         // caring about negative zero.
3719         
3720         if (!shouldCheckOverflow(node->arithMode()))
3721             m_jit.neg32(result.gpr());
3722         else if (!shouldCheckNegativeZero(node->arithMode()))
3723             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
3724         else {
3725             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
3726             m_jit.neg32(result.gpr());
3727         }
3728
3729         int32Result(result.gpr(), node);
3730         return;
3731     }
3732
3733 #if USE(JSVALUE64)
3734     case Int52RepUse: {
3735         ASSERT(shouldCheckOverflow(node->arithMode()));
3736         
3737         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)) {
3738             SpeculateWhicheverInt52Operand op1(this, node->child1());
3739             GPRTemporary result(this);
3740             GPRReg op1GPR = op1.gpr();
3741             GPRReg resultGPR = result.gpr();
3742             m_jit.move(op1GPR, resultGPR);
3743             m_jit.neg64(resultGPR);
3744             if (shouldCheckNegativeZero(node->arithMode())) {
3745                 speculationCheck(
3746                     NegativeZero, JSValueRegs(), 0,
3747                     m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3748             }
3749             int52Result(resultGPR, node, op1.format());
3750             return;
3751         }
3752         
3753         SpeculateInt52Operand op1(this, node->child1());
3754         GPRTemporary result(this);
3755         GPRReg op1GPR = op1.gpr();
3756         GPRReg resultGPR = result.gpr();
3757         m_jit.move(op1GPR, resultGPR);
3758         speculationCheck(
3759             Int52Overflow, JSValueRegs(), 0,
3760             m_jit.branchNeg64(MacroAssembler::Overflow, resultGPR));
3761         if (shouldCheckNegativeZero(node->arithMode())) {
3762             speculationCheck(
3763                 NegativeZero, JSValueRegs(), 0,
3764                 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3765         }
3766         int52Result(resultGPR, node);
3767         return;
3768     }
3769 #endif // USE(JSVALUE64)
3770         
3771     case DoubleRepUse: {
3772         SpeculateDoubleOperand op1(this, node->child1());
3773         FPRTemporary result(this);
3774         
3775         m_jit.negateDouble(op1.fpr(), result.fpr());
3776         
3777         doubleResult(result.fpr(), node);
3778         return;
3779     }
3780         
3781     default:
3782         RELEASE_ASSERT_NOT_REACHED();
3783         return;
3784     }
3785 }
3786 void SpeculativeJIT::compileArithMul(Node* node)
3787 {
3788     switch (node->binaryUseKind()) {
3789     case Int32Use: {
3790         if (node->child2()->isInt32Constant()) {
3791             SpeculateInt32Operand op1(this, node->child1());
3792             GPRTemporary result(this);
3793
3794             int32_t imm = node->child2()->asInt32();
3795             GPRReg op1GPR = op1.gpr();
3796             GPRReg resultGPR = result.gpr();
3797
3798             if (!shouldCheckOverflow(node->arithMode()))
3799                 m_jit.mul32(Imm32(imm), op1GPR, resultGPR);
3800             else {
3801                 speculationCheck(Overflow, JSValueRegs(), 0,
3802                     m_jit.branchMul32(MacroAssembler::Overflow, op1GPR, Imm32(imm), resultGPR));
3803             }
3804
3805             // The only way to create negative zero with a constant is:
3806             // -negative-op1 * 0.
3807             // -zero-op1 * negative constant.
3808             if (shouldCheckNegativeZero(node->arithMode())) {
3809                 if (!imm)
3810                     speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Signed, op1GPR));
3811                 else if (imm < 0) {
3812                     if (shouldCheckOverflow(node->arithMode()))
3813                         speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, resultGPR));
3814                     else
3815                         speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, op1GPR));
3816                 }
3817             }
3818
3819             int32Result(resultGPR, node);
3820             return;
3821         }
3822         SpeculateInt32Operand op1(this, node->child1());
3823         SpeculateInt32Operand op2(this, node->child2());
3824         GPRTemporary result(this);
3825
3826         GPRReg reg1 = op1.gpr();
3827         GPRReg reg2 = op2.gpr();
3828
3829         // We can perform truncated multiplications if we get to this point, because if the
3830         // fixup phase could not prove that it would be safe, it would have turned us into
3831         // a double multiplication.
3832         if (!shouldCheckOverflow(node->arithMode())) {
3833             m_jit.move(reg1, result.gpr());
3834             m_jit.mul32(reg2, result.gpr());
3835         } else {
3836             speculationCheck(
3837                 Overflow, JSValueRegs(), 0,
3838                 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3839         }
3840             
3841         // Check for negative zero, if the users of this node care about such things.
3842         if (shouldCheckNegativeZero(node->arithMode())) {
3843             MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3844             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Signed, reg1));
3845             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Signed, reg2));
3846             resultNonZero.link(&m_jit);
3847         }
3848
3849         int32Result(result.gpr(), node);
3850         return;
3851     }
3852
3853 #if USE(JSVALUE64)
3854     case Int52RepUse: {
3855         ASSERT(shouldCheckOverflow(node->arithMode()));
3856         
3857         // This is super clever. We want to do an int52 multiplication and check the
3858         // int52 overflow bit. There is no direct hardware support for this, but we do
3859         // have the ability to do an int64 multiplication and check the int64 overflow
3860         // bit. We leverage that. Consider that a, b are int52 numbers inside int64
3861         // registers, with the high 12 bits being sign-extended. We can do:
3862         //
3863         //     (a * (b << 12))
3864         //
3865         // This will give us a left-shifted int52 (value is in high 52 bits, low 16
3866         // bits are zero) plus the int52 overflow bit. I.e. whether this 64-bit
3867