op_mul/ArithMul(Untyped,Untyped) should be an IC
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGOSRExitFuzz.h"
38 #include "DFGSaneStringGetByValSlowPathGenerator.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DirectArguments.h"
41 #include "JITAddGenerator.h"
42 #include "JITBitAndGenerator.h"
43 #include "JITBitOrGenerator.h"
44 #include "JITBitXorGenerator.h"
45 #include "JITDivGenerator.h"
46 #include "JITLeftShiftGenerator.h"
47 #include "JITMulGenerator.h"
48 #include "JITRightShiftGenerator.h"
49 #include "JITSubGenerator.h"
50 #include "JSCInlines.h"
51 #include "JSEnvironmentRecord.h"
52 #include "JSGeneratorFunction.h"
53 #include "JSLexicalEnvironment.h"
54 #include "LinkBuffer.h"
55 #include "RegExpConstructor.h"
56 #include "ScopedArguments.h"
57 #include "ScratchRegisterAllocator.h"
58 #include "WriteBarrierBuffer.h"
59 #include <wtf/Box.h>
60 #include <wtf/MathExtras.h>
61
62 namespace JSC { namespace DFG {
63
64 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
65     : m_compileOkay(true)
66     , m_jit(jit)
67     , m_currentNode(0)
68     , m_lastGeneratedNode(LastNodeType)
69     , m_indexInBlock(0)
70     , m_generationInfo(m_jit.graph().frameRegisterCount())
71     , m_state(m_jit.graph())
72     , m_interpreter(m_jit.graph(), m_state)
73     , m_stream(&jit.jitCode()->variableEventStream)
74     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
75 {
76 }
77
78 SpeculativeJIT::~SpeculativeJIT()
79 {
80 }
81
82 void SpeculativeJIT::emitAllocateRawObject(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements, unsigned vectorLength)
83 {
84     IndexingType indexingType = structure->indexingType();
85     bool hasIndexingHeader = hasIndexedProperties(indexingType);
86
87     unsigned inlineCapacity = structure->inlineCapacity();
88     unsigned outOfLineCapacity = structure->outOfLineCapacity();
89     
90     GPRTemporary scratch(this);
91     GPRTemporary scratch2(this);
92     GPRReg scratchGPR = scratch.gpr();
93     GPRReg scratch2GPR = scratch2.gpr();
94
95     ASSERT(vectorLength >= numElements);
96     vectorLength = std::max(BASE_VECTOR_LEN, vectorLength);
97     
98     JITCompiler::JumpList slowCases;
99
100     size_t size = 0;
101     if (hasIndexingHeader)
102         size += vectorLength * sizeof(JSValue) + sizeof(IndexingHeader);
103     size += outOfLineCapacity * sizeof(JSValue);
104
105     if (size) {
106         slowCases.append(
107             emitAllocateBasicStorage(TrustedImm32(size), storageGPR));
108         if (hasIndexingHeader)
109             m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
110         else
111             m_jit.addPtr(TrustedImm32(sizeof(IndexingHeader)), storageGPR);
112     } else
113         m_jit.move(TrustedImmPtr(0), storageGPR);
114
115     size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
116     MarkedAllocator* allocatorPtr = &m_jit.vm()->heap.allocatorForObjectWithoutDestructor(allocationSize);
117     m_jit.move(TrustedImmPtr(allocatorPtr), scratchGPR);
118     emitAllocateJSObject(resultGPR, scratchGPR, TrustedImmPtr(structure), storageGPR, scratch2GPR, slowCases);
119
120     if (hasIndexingHeader)
121         m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
122
123     // I want a slow path that also loads out the storage pointer, and that's
124     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
125     // of work for a very small piece of functionality. :-/
126     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
127         slowCases, this, operationNewRawObject, resultGPR, storageGPR,
128         structure, vectorLength));
129
130     if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
131 #if USE(JSVALUE64)
132         m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
133         for (unsigned i = numElements; i < vectorLength; ++i)
134             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
135 #else
136         EncodedValueDescriptor value;
137         value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
138         for (unsigned i = numElements; i < vectorLength; ++i) {
139             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
140             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
141         }
142 #endif
143     }
144     
145     if (hasIndexingHeader)
146         m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
147 }
148
149 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
150 {
151     if (inlineCallFrame && !inlineCallFrame->isVarargs())
152         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
153     else {
154         VirtualRegister argumentCountRegister;
155         if (!inlineCallFrame)
156             argumentCountRegister = VirtualRegister(CallFrameSlot::argumentCount);
157         else
158             argumentCountRegister = inlineCallFrame->argumentCountRegister;
159         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
160         if (!includeThis)
161             m_jit.sub32(TrustedImm32(1), lengthGPR);
162     }
163 }
164
165 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
166 {
167     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
168 }
169
170 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
171 {
172     if (origin.inlineCallFrame) {
173         if (origin.inlineCallFrame->isClosureCall) {
174             m_jit.loadPtr(
175                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
176                 calleeGPR);
177         } else {
178             m_jit.move(
179                 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
180                 calleeGPR);
181         }
182     } else
183         m_jit.loadPtr(JITCompiler::addressFor(CallFrameSlot::callee), calleeGPR);
184 }
185
186 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
187 {
188     m_jit.addPtr(
189         TrustedImm32(
190             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
191         GPRInfo::callFrameRegister, startGPR);
192 }
193
194 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
195 {
196     if (!doOSRExitFuzzing())
197         return MacroAssembler::Jump();
198     
199     MacroAssembler::Jump result;
200     
201     m_jit.pushToSave(GPRInfo::regT0);
202     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
203     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
204     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
205     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
206     unsigned at = Options::fireOSRExitFuzzAt();
207     if (at || atOrAfter) {
208         unsigned threshold;
209         MacroAssembler::RelationalCondition condition;
210         if (atOrAfter) {
211             threshold = atOrAfter;
212             condition = MacroAssembler::Below;
213         } else {
214             threshold = at;
215             condition = MacroAssembler::NotEqual;
216         }
217         MacroAssembler::Jump ok = m_jit.branch32(
218             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
219         m_jit.popToRestore(GPRInfo::regT0);
220         result = m_jit.jump();
221         ok.link(&m_jit);
222     }
223     m_jit.popToRestore(GPRInfo::regT0);
224     
225     return result;
226 }
227
228 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
229 {
230     if (!m_compileOkay)
231         return;
232     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
233     if (fuzzJump.isSet()) {
234         JITCompiler::JumpList jumpsToFail;
235         jumpsToFail.append(fuzzJump);
236         jumpsToFail.append(jumpToFail);
237         m_jit.appendExitInfo(jumpsToFail);
238     } else
239         m_jit.appendExitInfo(jumpToFail);
240     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
241 }
242
243 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
244 {
245     if (!m_compileOkay)
246         return;
247     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
248     if (fuzzJump.isSet()) {
249         JITCompiler::JumpList myJumpsToFail;
250         myJumpsToFail.append(jumpsToFail);
251         myJumpsToFail.append(fuzzJump);
252         m_jit.appendExitInfo(myJumpsToFail);
253     } else
254         m_jit.appendExitInfo(jumpsToFail);
255     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
256 }
257
258 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
259 {
260     if (!m_compileOkay)
261         return OSRExitJumpPlaceholder();
262     unsigned index = m_jit.jitCode()->osrExit.size();
263     m_jit.appendExitInfo();
264     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
265     return OSRExitJumpPlaceholder(index);
266 }
267
268 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
269 {
270     return speculationCheck(kind, jsValueSource, nodeUse.node());
271 }
272
273 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
274 {
275     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
276 }
277
278 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
279 {
280     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
281 }
282
283 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
284 {
285     if (!m_compileOkay)
286         return;
287     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
288     m_jit.appendExitInfo(jumpToFail);
289     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
290 }
291
292 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
293 {
294     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
295 }
296
297 void SpeculativeJIT::emitInvalidationPoint(Node* node)
298 {
299     if (!m_compileOkay)
300         return;
301     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
302     m_jit.jitCode()->appendOSRExit(OSRExit(
303         UncountableInvalidation, JSValueSource(),
304         m_jit.graph().methodOfGettingAValueProfileFor(node),
305         this, m_stream->size()));
306     info.m_replacementSource = m_jit.watchpointLabel();
307     ASSERT(info.m_replacementSource.isSet());
308     noResult(node);
309 }
310
311 void SpeculativeJIT::unreachable(Node* node)
312 {
313     m_compileOkay = false;
314     m_jit.abortWithReason(DFGUnreachableNode, node->op());
315 }
316
317 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
318 {
319     if (!m_compileOkay)
320         return;
321     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
322     m_compileOkay = false;
323     if (verboseCompilationEnabled())
324         dataLog("Bailing compilation.\n");
325 }
326
327 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
328 {
329     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
330 }
331
332 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind exitKind)
333 {
334     ASSERT(needsTypeCheck(edge, typesPassedThrough));
335     m_interpreter.filter(edge, typesPassedThrough);
336     speculationCheck(exitKind, source, edge.node(), jumpToFail);
337 }
338
339 RegisterSet SpeculativeJIT::usedRegisters()
340 {
341     RegisterSet result;
342     
343     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
344         GPRReg gpr = GPRInfo::toRegister(i);
345         if (m_gprs.isInUse(gpr))
346             result.set(gpr);
347     }
348     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
349         FPRReg fpr = FPRInfo::toRegister(i);
350         if (m_fprs.isInUse(fpr))
351             result.set(fpr);
352     }
353     
354     result.merge(RegisterSet::stubUnavailableRegisters());
355     
356     return result;
357 }
358
359 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
360 {
361     m_slowPathGenerators.append(WTFMove(slowPathGenerator));
362 }
363
364 void SpeculativeJIT::addSlowPathGenerator(std::function<void()> lambda)
365 {
366     m_slowPathLambdas.append(SlowPathLambda{ lambda, m_currentNode, static_cast<unsigned>(m_stream->size()) });
367 }
368
369 void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder)
370 {
371     for (auto& slowPathGenerator : m_slowPathGenerators) {
372         pcToCodeOriginMapBuilder.appendItem(m_jit.label(), slowPathGenerator->origin().semantic);
373         slowPathGenerator->generate(this);
374     }
375     for (auto& slowPathLambda : m_slowPathLambdas) {
376         Node* currentNode = slowPathLambda.currentNode;
377         m_currentNode = currentNode;
378         m_outOfLineStreamIndex = slowPathLambda.streamIndex;
379         pcToCodeOriginMapBuilder.appendItem(m_jit.label(), currentNode->origin.semantic);
380         slowPathLambda.generator();
381         m_outOfLineStreamIndex = Nullopt;
382     }
383 }
384
385 void SpeculativeJIT::clearGenerationInfo()
386 {
387     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
388         m_generationInfo[i] = GenerationInfo();
389     m_gprs = RegisterBank<GPRInfo>();
390     m_fprs = RegisterBank<FPRInfo>();
391 }
392
393 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
394 {
395     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
396     Node* node = info.node();
397     DataFormat registerFormat = info.registerFormat();
398     ASSERT(registerFormat != DataFormatNone);
399     ASSERT(registerFormat != DataFormatDouble);
400         
401     SilentSpillAction spillAction;
402     SilentFillAction fillAction;
403         
404     if (!info.needsSpill())
405         spillAction = DoNothingForSpill;
406     else {
407 #if USE(JSVALUE64)
408         ASSERT(info.gpr() == source);
409         if (registerFormat == DataFormatInt32)
410             spillAction = Store32Payload;
411         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
412             spillAction = StorePtr;
413         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
414             spillAction = Store64;
415         else {
416             ASSERT(registerFormat & DataFormatJS);
417             spillAction = Store64;
418         }
419 #elif USE(JSVALUE32_64)
420         if (registerFormat & DataFormatJS) {
421             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
422             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
423         } else {
424             ASSERT(info.gpr() == source);
425             spillAction = Store32Payload;
426         }
427 #endif
428     }
429         
430     if (registerFormat == DataFormatInt32) {
431         ASSERT(info.gpr() == source);
432         ASSERT(isJSInt32(info.registerFormat()));
433         if (node->hasConstant()) {
434             ASSERT(node->isInt32Constant());
435             fillAction = SetInt32Constant;
436         } else
437             fillAction = Load32Payload;
438     } else if (registerFormat == DataFormatBoolean) {
439 #if USE(JSVALUE64)
440         RELEASE_ASSERT_NOT_REACHED();
441 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
442         fillAction = DoNothingForFill;
443 #endif
444 #elif USE(JSVALUE32_64)
445         ASSERT(info.gpr() == source);
446         if (node->hasConstant()) {
447             ASSERT(node->isBooleanConstant());
448             fillAction = SetBooleanConstant;
449         } else
450             fillAction = Load32Payload;
451 #endif
452     } else if (registerFormat == DataFormatCell) {
453         ASSERT(info.gpr() == source);
454         if (node->hasConstant()) {
455             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
456             node->asCell(); // To get the assertion.
457             fillAction = SetCellConstant;
458         } else {
459 #if USE(JSVALUE64)
460             fillAction = LoadPtr;
461 #else
462             fillAction = Load32Payload;
463 #endif
464         }
465     } else if (registerFormat == DataFormatStorage) {
466         ASSERT(info.gpr() == source);
467         fillAction = LoadPtr;
468     } else if (registerFormat == DataFormatInt52) {
469         if (node->hasConstant())
470             fillAction = SetInt52Constant;
471         else if (info.spillFormat() == DataFormatInt52)
472             fillAction = Load64;
473         else if (info.spillFormat() == DataFormatStrictInt52)
474             fillAction = Load64ShiftInt52Left;
475         else if (info.spillFormat() == DataFormatNone)
476             fillAction = Load64;
477         else {
478             RELEASE_ASSERT_NOT_REACHED();
479 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
480             fillAction = Load64; // Make GCC happy.
481 #endif
482         }
483     } else if (registerFormat == DataFormatStrictInt52) {
484         if (node->hasConstant())
485             fillAction = SetStrictInt52Constant;
486         else if (info.spillFormat() == DataFormatInt52)
487             fillAction = Load64ShiftInt52Right;
488         else if (info.spillFormat() == DataFormatStrictInt52)
489             fillAction = Load64;
490         else if (info.spillFormat() == DataFormatNone)
491             fillAction = Load64;
492         else {
493             RELEASE_ASSERT_NOT_REACHED();
494 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
495             fillAction = Load64; // Make GCC happy.
496 #endif
497         }
498     } else {
499         ASSERT(registerFormat & DataFormatJS);
500 #if USE(JSVALUE64)
501         ASSERT(info.gpr() == source);
502         if (node->hasConstant()) {
503             if (node->isCellConstant())
504                 fillAction = SetTrustedJSConstant;
505             else
506                 fillAction = SetJSConstant;
507         } else if (info.spillFormat() == DataFormatInt32) {
508             ASSERT(registerFormat == DataFormatJSInt32);
509             fillAction = Load32PayloadBoxInt;
510         } else
511             fillAction = Load64;
512 #else
513         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
514         if (node->hasConstant())
515             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
516         else if (info.payloadGPR() == source)
517             fillAction = Load32Payload;
518         else { // Fill the Tag
519             switch (info.spillFormat()) {
520             case DataFormatInt32:
521                 ASSERT(registerFormat == DataFormatJSInt32);
522                 fillAction = SetInt32Tag;
523                 break;
524             case DataFormatCell:
525                 ASSERT(registerFormat == DataFormatJSCell);
526                 fillAction = SetCellTag;
527                 break;
528             case DataFormatBoolean:
529                 ASSERT(registerFormat == DataFormatJSBoolean);
530                 fillAction = SetBooleanTag;
531                 break;
532             default:
533                 fillAction = Load32Tag;
534                 break;
535             }
536         }
537 #endif
538     }
539         
540     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
541 }
542     
543 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
544 {
545     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
546     Node* node = info.node();
547     ASSERT(info.registerFormat() == DataFormatDouble);
548
549     SilentSpillAction spillAction;
550     SilentFillAction fillAction;
551         
552     if (!info.needsSpill())
553         spillAction = DoNothingForSpill;
554     else {
555         ASSERT(!node->hasConstant());
556         ASSERT(info.spillFormat() == DataFormatNone);
557         ASSERT(info.fpr() == source);
558         spillAction = StoreDouble;
559     }
560         
561 #if USE(JSVALUE64)
562     if (node->hasConstant()) {
563         node->asNumber(); // To get the assertion.
564         fillAction = SetDoubleConstant;
565     } else {
566         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
567         fillAction = LoadDouble;
568     }
569 #elif USE(JSVALUE32_64)
570     ASSERT(info.registerFormat() == DataFormatDouble);
571     if (node->hasConstant()) {
572         node->asNumber(); // To get the assertion.
573         fillAction = SetDoubleConstant;
574     } else
575         fillAction = LoadDouble;
576 #endif
577
578     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
579 }
580     
581 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
582 {
583     switch (plan.spillAction()) {
584     case DoNothingForSpill:
585         break;
586     case Store32Tag:
587         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
588         break;
589     case Store32Payload:
590         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
591         break;
592     case StorePtr:
593         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
594         break;
595 #if USE(JSVALUE64)
596     case Store64:
597         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
598         break;
599 #endif
600     case StoreDouble:
601         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
602         break;
603     default:
604         RELEASE_ASSERT_NOT_REACHED();
605     }
606 }
607     
608 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
609 {
610 #if USE(JSVALUE32_64)
611     UNUSED_PARAM(canTrample);
612 #endif
613     switch (plan.fillAction()) {
614     case DoNothingForFill:
615         break;
616     case SetInt32Constant:
617         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
618         break;
619 #if USE(JSVALUE64)
620     case SetInt52Constant:
621         m_jit.move(Imm64(plan.node()->asAnyInt() << JSValue::int52ShiftAmount), plan.gpr());
622         break;
623     case SetStrictInt52Constant:
624         m_jit.move(Imm64(plan.node()->asAnyInt()), plan.gpr());
625         break;
626 #endif // USE(JSVALUE64)
627     case SetBooleanConstant:
628         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
629         break;
630     case SetCellConstant:
631         m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
632         break;
633 #if USE(JSVALUE64)
634     case SetTrustedJSConstant:
635         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
636         break;
637     case SetJSConstant:
638         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
639         break;
640     case SetDoubleConstant:
641         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
642         m_jit.move64ToDouble(canTrample, plan.fpr());
643         break;
644     case Load32PayloadBoxInt:
645         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
646         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
647         break;
648     case Load32PayloadConvertToInt52:
649         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
650         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
651         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
652         break;
653     case Load32PayloadSignExtend:
654         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
655         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
656         break;
657 #else
658     case SetJSConstantTag:
659         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
660         break;
661     case SetJSConstantPayload:
662         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
663         break;
664     case SetInt32Tag:
665         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
666         break;
667     case SetCellTag:
668         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
669         break;
670     case SetBooleanTag:
671         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
672         break;
673     case SetDoubleConstant:
674         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
675         break;
676 #endif
677     case Load32Tag:
678         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
679         break;
680     case Load32Payload:
681         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
682         break;
683     case LoadPtr:
684         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
685         break;
686 #if USE(JSVALUE64)
687     case Load64:
688         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
689         break;
690     case Load64ShiftInt52Right:
691         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
692         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
693         break;
694     case Load64ShiftInt52Left:
695         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
696         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
697         break;
698 #endif
699     case LoadDouble:
700         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
701         break;
702     default:
703         RELEASE_ASSERT_NOT_REACHED();
704     }
705 }
706     
707 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
708 {
709     switch (arrayMode.arrayClass()) {
710     case Array::OriginalArray: {
711         CRASH();
712 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
713         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
714         return result;
715 #endif
716     }
717         
718     case Array::Array:
719         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
720         return m_jit.branch32(
721             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
722         
723     case Array::NonArray:
724     case Array::OriginalNonArray:
725         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
726         return m_jit.branch32(
727             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
728         
729     case Array::PossiblyArray:
730         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
731         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
732     }
733     
734     RELEASE_ASSERT_NOT_REACHED();
735     return JITCompiler::Jump();
736 }
737
738 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
739 {
740     JITCompiler::JumpList result;
741     
742     switch (arrayMode.type()) {
743     case Array::Int32:
744         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
745
746     case Array::Double:
747         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
748
749     case Array::Contiguous:
750         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
751
752     case Array::Undecided:
753         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
754
755     case Array::ArrayStorage:
756     case Array::SlowPutArrayStorage: {
757         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
758         
759         if (arrayMode.isJSArray()) {
760             if (arrayMode.isSlowPut()) {
761                 result.append(
762                     m_jit.branchTest32(
763                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
764                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
765                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
766                 result.append(
767                     m_jit.branch32(
768                         MacroAssembler::Above, tempGPR,
769                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
770                 break;
771             }
772             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
773             result.append(
774                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
775             break;
776         }
777         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
778         if (arrayMode.isSlowPut()) {
779             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
780             result.append(
781                 m_jit.branch32(
782                     MacroAssembler::Above, tempGPR,
783                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
784             break;
785         }
786         result.append(
787             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
788         break;
789     }
790     default:
791         CRASH();
792         break;
793     }
794     
795     return result;
796 }
797
798 void SpeculativeJIT::checkArray(Node* node)
799 {
800     ASSERT(node->arrayMode().isSpecific());
801     ASSERT(!node->arrayMode().doesConversion());
802     
803     SpeculateCellOperand base(this, node->child1());
804     GPRReg baseReg = base.gpr();
805     
806     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
807         noResult(m_currentNode);
808         return;
809     }
810     
811     const ClassInfo* expectedClassInfo = 0;
812     
813     switch (node->arrayMode().type()) {
814     case Array::AnyTypedArray:
815     case Array::String:
816         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
817         break;
818     case Array::Int32:
819     case Array::Double:
820     case Array::Contiguous:
821     case Array::Undecided:
822     case Array::ArrayStorage:
823     case Array::SlowPutArrayStorage: {
824         GPRTemporary temp(this);
825         GPRReg tempGPR = temp.gpr();
826         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
827         speculationCheck(
828             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
829             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
830         
831         noResult(m_currentNode);
832         return;
833     }
834     case Array::DirectArguments:
835         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
836         noResult(m_currentNode);
837         return;
838     case Array::ScopedArguments:
839         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
840         noResult(m_currentNode);
841         return;
842     default:
843         speculateCellTypeWithoutTypeFiltering(
844             node->child1(), baseReg,
845             typeForTypedArrayType(node->arrayMode().typedArrayType()));
846         noResult(m_currentNode);
847         return;
848     }
849     
850     RELEASE_ASSERT(expectedClassInfo);
851     
852     GPRTemporary temp(this);
853     GPRTemporary temp2(this);
854     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
855     speculationCheck(
856         BadType, JSValueSource::unboxedCell(baseReg), node,
857         m_jit.branchPtr(
858             MacroAssembler::NotEqual,
859             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
860             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
861     
862     noResult(m_currentNode);
863 }
864
865 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
866 {
867     ASSERT(node->arrayMode().doesConversion());
868     
869     GPRTemporary temp(this);
870     GPRTemporary structure;
871     GPRReg tempGPR = temp.gpr();
872     GPRReg structureGPR = InvalidGPRReg;
873     
874     if (node->op() != ArrayifyToStructure) {
875         GPRTemporary realStructure(this);
876         structure.adopt(realStructure);
877         structureGPR = structure.gpr();
878     }
879         
880     // We can skip all that comes next if we already have array storage.
881     MacroAssembler::JumpList slowPath;
882     
883     if (node->op() == ArrayifyToStructure) {
884         slowPath.append(m_jit.branchWeakStructure(
885             JITCompiler::NotEqual,
886             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
887             node->structure()));
888     } else {
889         m_jit.load8(
890             MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
891         
892         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
893     }
894     
895     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
896         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
897     
898     noResult(m_currentNode);
899 }
900
901 void SpeculativeJIT::arrayify(Node* node)
902 {
903     ASSERT(node->arrayMode().isSpecific());
904     
905     SpeculateCellOperand base(this, node->child1());
906     
907     if (!node->child2()) {
908         arrayify(node, base.gpr(), InvalidGPRReg);
909         return;
910     }
911     
912     SpeculateInt32Operand property(this, node->child2());
913     
914     arrayify(node, base.gpr(), property.gpr());
915 }
916
917 GPRReg SpeculativeJIT::fillStorage(Edge edge)
918 {
919     VirtualRegister virtualRegister = edge->virtualRegister();
920     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
921     
922     switch (info.registerFormat()) {
923     case DataFormatNone: {
924         if (info.spillFormat() == DataFormatStorage) {
925             GPRReg gpr = allocate();
926             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
927             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
928             info.fillStorage(*m_stream, gpr);
929             return gpr;
930         }
931         
932         // Must be a cell; fill it as a cell and then return the pointer.
933         return fillSpeculateCell(edge);
934     }
935         
936     case DataFormatStorage: {
937         GPRReg gpr = info.gpr();
938         m_gprs.lock(gpr);
939         return gpr;
940     }
941         
942     default:
943         return fillSpeculateCell(edge);
944     }
945 }
946
947 void SpeculativeJIT::useChildren(Node* node)
948 {
949     if (node->flags() & NodeHasVarArgs) {
950         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
951             if (!!m_jit.graph().m_varArgChildren[childIdx])
952                 use(m_jit.graph().m_varArgChildren[childIdx]);
953         }
954     } else {
955         Edge child1 = node->child1();
956         if (!child1) {
957             ASSERT(!node->child2() && !node->child3());
958             return;
959         }
960         use(child1);
961         
962         Edge child2 = node->child2();
963         if (!child2) {
964             ASSERT(!node->child3());
965             return;
966         }
967         use(child2);
968         
969         Edge child3 = node->child3();
970         if (!child3)
971             return;
972         use(child3);
973     }
974 }
975
976 void SpeculativeJIT::compileTryGetById(Node* node)
977 {
978     switch (node->child1().useKind()) {
979     case CellUse: {
980         SpeculateCellOperand base(this, node->child1());
981         JSValueRegsTemporary result(this, Reuse, base);
982
983         JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
984         JSValueRegs resultRegs = result.regs();
985
986         base.use();
987
988         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), JITCompiler::Jump(), NeedToSpill, AccessType::GetPure);
989
990         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
991         break;
992     }
993
994     case UntypedUse: {
995         JSValueOperand base(this, node->child1());
996         JSValueRegsTemporary result(this, Reuse, base);
997
998         JSValueRegs baseRegs = base.jsValueRegs();
999         JSValueRegs resultRegs = result.regs();
1000
1001         base.use();
1002
1003         JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
1004
1005         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), notCell, DontSpill, AccessType::GetPure);
1006
1007         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1008         break;
1009     }
1010
1011     default:
1012         DFG_CRASH(m_jit.graph(), node, "Bad use kind");
1013         break;
1014     } 
1015 }
1016
1017 void SpeculativeJIT::compileIn(Node* node)
1018 {
1019     SpeculateCellOperand base(this, node->child2());
1020     GPRReg baseGPR = base.gpr();
1021     
1022     if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
1023         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
1024             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
1025             
1026             GPRTemporary result(this);
1027             GPRReg resultGPR = result.gpr();
1028
1029             use(node->child1());
1030             
1031             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
1032             MacroAssembler::Label done = m_jit.label();
1033             
1034             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
1035             // we can cast it to const AtomicStringImpl* safely.
1036             auto slowPath = slowPathCall(
1037                 jump.m_jump, this, operationInOptimize,
1038                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
1039                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
1040             
1041             stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
1042             stubInfo->codeOrigin = node->origin.semantic;
1043             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
1044             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
1045 #if USE(JSVALUE32_64)
1046             stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
1047             stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
1048 #endif
1049             stubInfo->patch.usedRegisters = usedRegisters();
1050
1051             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
1052             addSlowPathGenerator(WTFMove(slowPath));
1053
1054             base.use();
1055
1056             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1057             return;
1058         }
1059     }
1060
1061     JSValueOperand key(this, node->child1());
1062     JSValueRegs regs = key.jsValueRegs();
1063         
1064     GPRFlushedCallResult result(this);
1065     GPRReg resultGPR = result.gpr();
1066         
1067     base.use();
1068     key.use();
1069         
1070     flushRegisters();
1071     callOperation(
1072         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
1073         baseGPR, regs);
1074     m_jit.exceptionCheck();
1075     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1076 }
1077
1078 void SpeculativeJIT::compileDeleteById(Node* node)
1079 {
1080     JSValueOperand value(this, node->child1());
1081     GPRFlushedCallResult result(this);
1082
1083     JSValueRegs valueRegs = value.jsValueRegs();
1084     GPRReg resultGPR = result.gpr();
1085
1086     value.use();
1087
1088     flushRegisters();
1089     callOperation(operationDeleteById, resultGPR, valueRegs, identifierUID(node->identifierNumber()));
1090     m_jit.exceptionCheck();
1091
1092     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1093 }
1094
1095 void SpeculativeJIT::compileDeleteByVal(Node* node)
1096 {
1097     JSValueOperand base(this, node->child1());
1098     JSValueOperand key(this, node->child2());
1099     GPRFlushedCallResult result(this);
1100
1101     JSValueRegs baseRegs = base.jsValueRegs();
1102     JSValueRegs keyRegs = key.jsValueRegs();
1103     GPRReg resultGPR = result.gpr();
1104
1105     base.use();
1106     key.use();
1107
1108     flushRegisters();
1109     callOperation(operationDeleteByVal, resultGPR, baseRegs, keyRegs);
1110     m_jit.exceptionCheck();
1111
1112     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1113 }
1114
1115 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1116 {
1117     unsigned branchIndexInBlock = detectPeepHoleBranch();
1118     if (branchIndexInBlock != UINT_MAX) {
1119         Node* branchNode = m_block->at(branchIndexInBlock);
1120
1121         ASSERT(node->adjustedRefCount() == 1);
1122         
1123         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1124     
1125         m_indexInBlock = branchIndexInBlock;
1126         m_currentNode = branchNode;
1127         
1128         return true;
1129     }
1130     
1131     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1132     
1133     return false;
1134 }
1135
1136 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1137 {
1138     unsigned branchIndexInBlock = detectPeepHoleBranch();
1139     if (branchIndexInBlock != UINT_MAX) {
1140         Node* branchNode = m_block->at(branchIndexInBlock);
1141
1142         ASSERT(node->adjustedRefCount() == 1);
1143         
1144         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1145     
1146         m_indexInBlock = branchIndexInBlock;
1147         m_currentNode = branchNode;
1148         
1149         return true;
1150     }
1151     
1152     nonSpeculativeNonPeepholeStrictEq(node, invert);
1153     
1154     return false;
1155 }
1156
1157 static const char* dataFormatString(DataFormat format)
1158 {
1159     // These values correspond to the DataFormat enum.
1160     const char* strings[] = {
1161         "[  ]",
1162         "[ i]",
1163         "[ d]",
1164         "[ c]",
1165         "Err!",
1166         "Err!",
1167         "Err!",
1168         "Err!",
1169         "[J ]",
1170         "[Ji]",
1171         "[Jd]",
1172         "[Jc]",
1173         "Err!",
1174         "Err!",
1175         "Err!",
1176         "Err!",
1177     };
1178     return strings[format];
1179 }
1180
1181 void SpeculativeJIT::dump(const char* label)
1182 {
1183     if (label)
1184         dataLogF("<%s>\n", label);
1185
1186     dataLogF("  gprs:\n");
1187     m_gprs.dump();
1188     dataLogF("  fprs:\n");
1189     m_fprs.dump();
1190     dataLogF("  VirtualRegisters:\n");
1191     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1192         GenerationInfo& info = m_generationInfo[i];
1193         if (info.alive())
1194             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1195         else
1196             dataLogF("    % 3d:[__][__]", i);
1197         if (info.registerFormat() == DataFormatDouble)
1198             dataLogF(":fpr%d\n", info.fpr());
1199         else if (info.registerFormat() != DataFormatNone
1200 #if USE(JSVALUE32_64)
1201             && !(info.registerFormat() & DataFormatJS)
1202 #endif
1203             ) {
1204             ASSERT(info.gpr() != InvalidGPRReg);
1205             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1206         } else
1207             dataLogF("\n");
1208     }
1209     if (label)
1210         dataLogF("</%s>\n", label);
1211 }
1212
1213 GPRTemporary::GPRTemporary()
1214     : m_jit(0)
1215     , m_gpr(InvalidGPRReg)
1216 {
1217 }
1218
1219 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1220     : m_jit(jit)
1221     , m_gpr(InvalidGPRReg)
1222 {
1223     m_gpr = m_jit->allocate();
1224 }
1225
1226 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1227     : m_jit(jit)
1228     , m_gpr(InvalidGPRReg)
1229 {
1230     m_gpr = m_jit->allocate(specific);
1231 }
1232
1233 #if USE(JSVALUE32_64)
1234 GPRTemporary::GPRTemporary(
1235     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1236     : m_jit(jit)
1237     , m_gpr(InvalidGPRReg)
1238 {
1239     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1240         m_gpr = m_jit->reuse(op1.gpr(which));
1241     else
1242         m_gpr = m_jit->allocate();
1243 }
1244 #endif // USE(JSVALUE32_64)
1245
1246 JSValueRegsTemporary::JSValueRegsTemporary() { }
1247
1248 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1249 #if USE(JSVALUE64)
1250     : m_gpr(jit)
1251 #else
1252     : m_payloadGPR(jit)
1253     , m_tagGPR(jit)
1254 #endif
1255 {
1256 }
1257
1258 #if USE(JSVALUE64)
1259 template<typename T>
1260 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord)
1261     : m_gpr(jit, Reuse, operand)
1262 {
1263 }
1264 #else
1265 template<typename T>
1266 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord resultWord)
1267 {
1268     if (resultWord == PayloadWord) {
1269         m_payloadGPR = GPRTemporary(jit, Reuse, operand);
1270         m_tagGPR = GPRTemporary(jit);
1271     } else {
1272         m_payloadGPR = GPRTemporary(jit);
1273         m_tagGPR = GPRTemporary(jit, Reuse, operand);
1274     }
1275 }
1276 #endif
1277
1278 #if USE(JSVALUE64)
1279 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1280 {
1281     m_gpr = GPRTemporary(jit, Reuse, operand);
1282 }
1283 #else
1284 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1285 {
1286     if (jit->canReuse(operand.node())) {
1287         m_payloadGPR = GPRTemporary(jit, Reuse, operand, PayloadWord);
1288         m_tagGPR = GPRTemporary(jit, Reuse, operand, TagWord);
1289     } else {
1290         m_payloadGPR = GPRTemporary(jit);
1291         m_tagGPR = GPRTemporary(jit);
1292     }
1293 }
1294 #endif
1295
1296 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1297
1298 JSValueRegs JSValueRegsTemporary::regs()
1299 {
1300 #if USE(JSVALUE64)
1301     return JSValueRegs(m_gpr.gpr());
1302 #else
1303     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1304 #endif
1305 }
1306
1307 void GPRTemporary::adopt(GPRTemporary& other)
1308 {
1309     ASSERT(!m_jit);
1310     ASSERT(m_gpr == InvalidGPRReg);
1311     ASSERT(other.m_jit);
1312     ASSERT(other.m_gpr != InvalidGPRReg);
1313     m_jit = other.m_jit;
1314     m_gpr = other.m_gpr;
1315     other.m_jit = 0;
1316     other.m_gpr = InvalidGPRReg;
1317 }
1318
1319 FPRTemporary::FPRTemporary(FPRTemporary&& other)
1320 {
1321     ASSERT(other.m_jit);
1322     ASSERT(other.m_fpr != InvalidFPRReg);
1323     m_jit = other.m_jit;
1324     m_fpr = other.m_fpr;
1325
1326     other.m_jit = nullptr;
1327 }
1328
1329 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1330     : m_jit(jit)
1331     , m_fpr(InvalidFPRReg)
1332 {
1333     m_fpr = m_jit->fprAllocate();
1334 }
1335
1336 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1337     : m_jit(jit)
1338     , m_fpr(InvalidFPRReg)
1339 {
1340     if (m_jit->canReuse(op1.node()))
1341         m_fpr = m_jit->reuse(op1.fpr());
1342     else
1343         m_fpr = m_jit->fprAllocate();
1344 }
1345
1346 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1347     : m_jit(jit)
1348     , m_fpr(InvalidFPRReg)
1349 {
1350     if (m_jit->canReuse(op1.node()))
1351         m_fpr = m_jit->reuse(op1.fpr());
1352     else if (m_jit->canReuse(op2.node()))
1353         m_fpr = m_jit->reuse(op2.fpr());
1354     else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1355         m_fpr = m_jit->reuse(op1.fpr());
1356     else
1357         m_fpr = m_jit->fprAllocate();
1358 }
1359
1360 #if USE(JSVALUE32_64)
1361 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1362     : m_jit(jit)
1363     , m_fpr(InvalidFPRReg)
1364 {
1365     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1366         m_fpr = m_jit->reuse(op1.fpr());
1367     else
1368         m_fpr = m_jit->fprAllocate();
1369 }
1370 #endif
1371
1372 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1373 {
1374     BasicBlock* taken = branchNode->branchData()->taken.block;
1375     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1376
1377     if (taken == nextBlock()) {
1378         condition = MacroAssembler::invert(condition);
1379         std::swap(taken, notTaken);
1380     }
1381
1382     SpeculateDoubleOperand op1(this, node->child1());
1383     SpeculateDoubleOperand op2(this, node->child2());
1384     
1385     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1386     jump(notTaken);
1387 }
1388
1389 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1390 {
1391     BasicBlock* taken = branchNode->branchData()->taken.block;
1392     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1393
1394     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1395     
1396     if (taken == nextBlock()) {
1397         condition = MacroAssembler::NotEqual;
1398         BasicBlock* tmp = taken;
1399         taken = notTaken;
1400         notTaken = tmp;
1401     }
1402
1403     SpeculateCellOperand op1(this, node->child1());
1404     SpeculateCellOperand op2(this, node->child2());
1405     
1406     GPRReg op1GPR = op1.gpr();
1407     GPRReg op2GPR = op2.gpr();
1408     
1409     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1410         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1411             speculationCheck(
1412                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1413         }
1414         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1415             speculationCheck(
1416                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1417         }
1418     } else {
1419         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1420             speculationCheck(
1421                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1422                 m_jit.branchIfNotObject(op1GPR));
1423         }
1424         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1425             m_jit.branchTest8(
1426                 MacroAssembler::NonZero, 
1427                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1428                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1429
1430         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1431             speculationCheck(
1432                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1433                 m_jit.branchIfNotObject(op2GPR));
1434         }
1435         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1436             m_jit.branchTest8(
1437                 MacroAssembler::NonZero, 
1438                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1439                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1440     }
1441
1442     branchPtr(condition, op1GPR, op2GPR, taken);
1443     jump(notTaken);
1444 }
1445
1446 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1447 {
1448     BasicBlock* taken = branchNode->branchData()->taken.block;
1449     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1450
1451     // The branch instruction will branch to the taken block.
1452     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1453     if (taken == nextBlock()) {
1454         condition = JITCompiler::invert(condition);
1455         BasicBlock* tmp = taken;
1456         taken = notTaken;
1457         notTaken = tmp;
1458     }
1459
1460     if (node->child1()->isInt32Constant()) {
1461         int32_t imm = node->child1()->asInt32();
1462         SpeculateBooleanOperand op2(this, node->child2());
1463         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1464     } else if (node->child2()->isInt32Constant()) {
1465         SpeculateBooleanOperand op1(this, node->child1());
1466         int32_t imm = node->child2()->asInt32();
1467         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1468     } else {
1469         SpeculateBooleanOperand op1(this, node->child1());
1470         SpeculateBooleanOperand op2(this, node->child2());
1471         branch32(condition, op1.gpr(), op2.gpr(), taken);
1472     }
1473
1474     jump(notTaken);
1475 }
1476
1477 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1478 {
1479     BasicBlock* taken = branchNode->branchData()->taken.block;
1480     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1481
1482     // The branch instruction will branch to the taken block.
1483     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1484     if (taken == nextBlock()) {
1485         condition = JITCompiler::invert(condition);
1486         BasicBlock* tmp = taken;
1487         taken = notTaken;
1488         notTaken = tmp;
1489     }
1490
1491     if (node->child1()->isInt32Constant()) {
1492         int32_t imm = node->child1()->asInt32();
1493         SpeculateInt32Operand op2(this, node->child2());
1494         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1495     } else if (node->child2()->isInt32Constant()) {
1496         SpeculateInt32Operand op1(this, node->child1());
1497         int32_t imm = node->child2()->asInt32();
1498         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1499     } else {
1500         SpeculateInt32Operand op1(this, node->child1());
1501         SpeculateInt32Operand op2(this, node->child2());
1502         branch32(condition, op1.gpr(), op2.gpr(), taken);
1503     }
1504
1505     jump(notTaken);
1506 }
1507
1508 // Returns true if the compare is fused with a subsequent branch.
1509 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1510 {
1511     // Fused compare & branch.
1512     unsigned branchIndexInBlock = detectPeepHoleBranch();
1513     if (branchIndexInBlock != UINT_MAX) {
1514         Node* branchNode = m_block->at(branchIndexInBlock);
1515
1516         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1517         // so can be no intervening nodes to also reference the compare. 
1518         ASSERT(node->adjustedRefCount() == 1);
1519
1520         if (node->isBinaryUseKind(Int32Use))
1521             compilePeepHoleInt32Branch(node, branchNode, condition);
1522 #if USE(JSVALUE64)
1523         else if (node->isBinaryUseKind(Int52RepUse))
1524             compilePeepHoleInt52Branch(node, branchNode, condition);
1525 #endif // USE(JSVALUE64)
1526         else if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1527             // Use non-peephole comparison, for now.
1528             return false;
1529         } else if (node->isBinaryUseKind(DoubleRepUse))
1530             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1531         else if (node->op() == CompareEq) {
1532             if (node->isBinaryUseKind(BooleanUse))
1533                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1534             else if (node->isBinaryUseKind(SymbolUse))
1535                 compilePeepHoleSymbolEquality(node, branchNode);
1536             else if (node->isBinaryUseKind(ObjectUse))
1537                 compilePeepHoleObjectEquality(node, branchNode);
1538             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1539                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1540             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1541                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1542             else if (!needsTypeCheck(node->child1(), SpecOther))
1543                 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1544             else if (!needsTypeCheck(node->child2(), SpecOther))
1545                 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1546             else {
1547                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1548                 return true;
1549             }
1550         } else {
1551             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1552             return true;
1553         }
1554
1555         use(node->child1());
1556         use(node->child2());
1557         m_indexInBlock = branchIndexInBlock;
1558         m_currentNode = branchNode;
1559         return true;
1560     }
1561     return false;
1562 }
1563
1564 void SpeculativeJIT::noticeOSRBirth(Node* node)
1565 {
1566     if (!node->hasVirtualRegister())
1567         return;
1568     
1569     VirtualRegister virtualRegister = node->virtualRegister();
1570     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1571     
1572     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1573 }
1574
1575 void SpeculativeJIT::compileMovHint(Node* node)
1576 {
1577     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1578     
1579     Node* child = node->child1().node();
1580     noticeOSRBirth(child);
1581     
1582     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1583 }
1584
1585 void SpeculativeJIT::bail(AbortReason reason)
1586 {
1587     if (verboseCompilationEnabled())
1588         dataLog("Bailing compilation.\n");
1589     m_compileOkay = true;
1590     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1591     clearGenerationInfo();
1592 }
1593
1594 void SpeculativeJIT::compileCurrentBlock()
1595 {
1596     ASSERT(m_compileOkay);
1597     
1598     if (!m_block)
1599         return;
1600     
1601     ASSERT(m_block->isReachable);
1602     
1603     m_jit.blockHeads()[m_block->index] = m_jit.label();
1604
1605     if (!m_block->intersectionOfCFAHasVisited) {
1606         // Don't generate code for basic blocks that are unreachable according to CFA.
1607         // But to be sure that nobody has generated a jump to this block, drop in a
1608         // breakpoint here.
1609         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1610         return;
1611     }
1612
1613     m_stream->appendAndLog(VariableEvent::reset());
1614     
1615     m_jit.jitAssertHasValidCallFrame();
1616     m_jit.jitAssertTagsInPlace();
1617     m_jit.jitAssertArgumentCountSane();
1618
1619     m_state.reset();
1620     m_state.beginBasicBlock(m_block);
1621     
1622     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1623         int operand = m_block->variablesAtHead.operandForIndex(i);
1624         Node* node = m_block->variablesAtHead[i];
1625         if (!node)
1626             continue; // No need to record dead SetLocal's.
1627         
1628         VariableAccessData* variable = node->variableAccessData();
1629         DataFormat format;
1630         if (!node->refCount())
1631             continue; // No need to record dead SetLocal's.
1632         format = dataFormatFor(variable->flushFormat());
1633         m_stream->appendAndLog(
1634             VariableEvent::setLocal(
1635                 VirtualRegister(operand),
1636                 variable->machineLocal(),
1637                 format));
1638     }
1639
1640     m_origin = NodeOrigin();
1641     
1642     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1643         m_currentNode = m_block->at(m_indexInBlock);
1644         
1645         // We may have hit a contradiction that the CFA was aware of but that the JIT
1646         // didn't cause directly.
1647         if (!m_state.isValid()) {
1648             bail(DFGBailedAtTopOfBlock);
1649             return;
1650         }
1651
1652         m_interpreter.startExecuting();
1653         m_interpreter.executeKnownEdgeTypes(m_currentNode);
1654         m_jit.setForNode(m_currentNode);
1655         m_origin = m_currentNode->origin;
1656         if (validationEnabled())
1657             m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1658         m_lastGeneratedNode = m_currentNode->op();
1659         
1660         ASSERT(m_currentNode->shouldGenerate());
1661         
1662         if (verboseCompilationEnabled()) {
1663             dataLogF(
1664                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1665                 (int)m_currentNode->index(),
1666                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1667             dataLog("\n");
1668         }
1669
1670         if (Options::validateDFGExceptionHandling() && (mayExit(m_jit.graph(), m_currentNode) != DoesNotExit || m_currentNode->isTerminal()))
1671             m_jit.jitReleaseAssertNoException();
1672
1673         m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.label(), m_origin.semantic);
1674
1675         compile(m_currentNode);
1676         
1677         if (belongsInMinifiedGraph(m_currentNode->op()))
1678             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1679         
1680 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1681         m_jit.clearRegisterAllocationOffsets();
1682 #endif
1683         
1684         if (!m_compileOkay) {
1685             bail(DFGBailedAtEndOfNode);
1686             return;
1687         }
1688         
1689         // Make sure that the abstract state is rematerialized for the next node.
1690         m_interpreter.executeEffects(m_indexInBlock);
1691     }
1692     
1693     // Perform the most basic verification that children have been used correctly.
1694     if (!ASSERT_DISABLED) {
1695         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1696             GenerationInfo& info = m_generationInfo[index];
1697             RELEASE_ASSERT(!info.alive());
1698         }
1699     }
1700 }
1701
1702 // If we are making type predictions about our arguments then
1703 // we need to check that they are correct on function entry.
1704 void SpeculativeJIT::checkArgumentTypes()
1705 {
1706     ASSERT(!m_currentNode);
1707     m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1708
1709     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1710         Node* node = m_jit.graph().m_arguments[i];
1711         if (!node) {
1712             // The argument is dead. We don't do any checks for such arguments.
1713             continue;
1714         }
1715         
1716         ASSERT(node->op() == SetArgument);
1717         ASSERT(node->shouldGenerate());
1718
1719         VariableAccessData* variableAccessData = node->variableAccessData();
1720         FlushFormat format = variableAccessData->flushFormat();
1721         
1722         if (format == FlushedJSValue)
1723             continue;
1724         
1725         VirtualRegister virtualRegister = variableAccessData->local();
1726
1727         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1728         
1729 #if USE(JSVALUE64)
1730         switch (format) {
1731         case FlushedInt32: {
1732             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1733             break;
1734         }
1735         case FlushedBoolean: {
1736             GPRTemporary temp(this);
1737             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1738             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1739             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1740             break;
1741         }
1742         case FlushedCell: {
1743             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1744             break;
1745         }
1746         default:
1747             RELEASE_ASSERT_NOT_REACHED();
1748             break;
1749         }
1750 #else
1751         switch (format) {
1752         case FlushedInt32: {
1753             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1754             break;
1755         }
1756         case FlushedBoolean: {
1757             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1758             break;
1759         }
1760         case FlushedCell: {
1761             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1762             break;
1763         }
1764         default:
1765             RELEASE_ASSERT_NOT_REACHED();
1766             break;
1767         }
1768 #endif
1769     }
1770
1771     m_origin = NodeOrigin();
1772 }
1773
1774 bool SpeculativeJIT::compile()
1775 {
1776     checkArgumentTypes();
1777     
1778     ASSERT(!m_currentNode);
1779     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1780         m_jit.setForBlockIndex(blockIndex);
1781         m_block = m_jit.graph().block(blockIndex);
1782         compileCurrentBlock();
1783     }
1784     linkBranches();
1785     return true;
1786 }
1787
1788 void SpeculativeJIT::createOSREntries()
1789 {
1790     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1791         BasicBlock* block = m_jit.graph().block(blockIndex);
1792         if (!block)
1793             continue;
1794         if (!block->isOSRTarget)
1795             continue;
1796         
1797         // Currently we don't have OSR entry trampolines. We could add them
1798         // here if need be.
1799         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1800     }
1801 }
1802
1803 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1804 {
1805     unsigned osrEntryIndex = 0;
1806     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1807         BasicBlock* block = m_jit.graph().block(blockIndex);
1808         if (!block)
1809             continue;
1810         if (!block->isOSRTarget)
1811             continue;
1812         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1813     }
1814     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1815     
1816     if (verboseCompilationEnabled()) {
1817         DumpContext dumpContext;
1818         dataLog("OSR Entries:\n");
1819         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1820             dataLog("    ", inContext(entryData, &dumpContext), "\n");
1821         if (!dumpContext.isEmpty())
1822             dumpContext.dump(WTF::dataFile());
1823     }
1824 }
1825
1826 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1827 {
1828     Edge child3 = m_jit.graph().varArgChild(node, 2);
1829     Edge child4 = m_jit.graph().varArgChild(node, 3);
1830
1831     ArrayMode arrayMode = node->arrayMode();
1832     
1833     GPRReg baseReg = base.gpr();
1834     GPRReg propertyReg = property.gpr();
1835     
1836     SpeculateDoubleOperand value(this, child3);
1837
1838     FPRReg valueReg = value.fpr();
1839     
1840     DFG_TYPE_CHECK(
1841         JSValueRegs(), child3, SpecFullRealNumber,
1842         m_jit.branchDouble(
1843             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1844     
1845     if (!m_compileOkay)
1846         return;
1847     
1848     StorageOperand storage(this, child4);
1849     GPRReg storageReg = storage.gpr();
1850
1851     if (node->op() == PutByValAlias) {
1852         // Store the value to the array.
1853         GPRReg propertyReg = property.gpr();
1854         FPRReg valueReg = value.fpr();
1855         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1856         
1857         noResult(m_currentNode);
1858         return;
1859     }
1860     
1861     GPRTemporary temporary;
1862     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1863
1864     MacroAssembler::Jump slowCase;
1865     
1866     if (arrayMode.isInBounds()) {
1867         speculationCheck(
1868             OutOfBounds, JSValueRegs(), 0,
1869             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1870     } else {
1871         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1872         
1873         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1874         
1875         if (!arrayMode.isOutOfBounds())
1876             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1877         
1878         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1879         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1880         
1881         inBounds.link(&m_jit);
1882     }
1883     
1884     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1885
1886     base.use();
1887     property.use();
1888     value.use();
1889     storage.use();
1890     
1891     if (arrayMode.isOutOfBounds()) {
1892         addSlowPathGenerator(
1893             slowPathCall(
1894                 slowCase, this,
1895                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1896                 NoResult, baseReg, propertyReg, valueReg));
1897     }
1898
1899     noResult(m_currentNode, UseChildrenCalledExplicitly);
1900 }
1901
1902 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1903 {
1904     SpeculateCellOperand string(this, node->child1());
1905     SpeculateStrictInt32Operand index(this, node->child2());
1906     StorageOperand storage(this, node->child3());
1907
1908     GPRReg stringReg = string.gpr();
1909     GPRReg indexReg = index.gpr();
1910     GPRReg storageReg = storage.gpr();
1911     
1912     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1913
1914     // unsigned comparison so we can filter out negative indices and indices that are too large
1915     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1916
1917     GPRTemporary scratch(this);
1918     GPRReg scratchReg = scratch.gpr();
1919
1920     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1921
1922     // Load the character into scratchReg
1923     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1924
1925     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1926     JITCompiler::Jump cont8Bit = m_jit.jump();
1927
1928     is16Bit.link(&m_jit);
1929
1930     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1931
1932     cont8Bit.link(&m_jit);
1933
1934     int32Result(scratchReg, m_currentNode);
1935 }
1936
1937 void SpeculativeJIT::compileGetByValOnString(Node* node)
1938 {
1939     SpeculateCellOperand base(this, node->child1());
1940     SpeculateStrictInt32Operand property(this, node->child2());
1941     StorageOperand storage(this, node->child3());
1942     GPRReg baseReg = base.gpr();
1943     GPRReg propertyReg = property.gpr();
1944     GPRReg storageReg = storage.gpr();
1945
1946     GPRTemporary scratch(this);
1947     GPRReg scratchReg = scratch.gpr();
1948 #if USE(JSVALUE32_64)
1949     GPRTemporary resultTag;
1950     GPRReg resultTagReg = InvalidGPRReg;
1951     if (node->arrayMode().isOutOfBounds()) {
1952         GPRTemporary realResultTag(this);
1953         resultTag.adopt(realResultTag);
1954         resultTagReg = resultTag.gpr();
1955     }
1956 #endif
1957
1958     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1959
1960     // unsigned comparison so we can filter out negative indices and indices that are too large
1961     JITCompiler::Jump outOfBounds = m_jit.branch32(
1962         MacroAssembler::AboveOrEqual, propertyReg,
1963         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1964     if (node->arrayMode().isInBounds())
1965         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1966
1967     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1968
1969     // Load the character into scratchReg
1970     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1971
1972     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1973     JITCompiler::Jump cont8Bit = m_jit.jump();
1974
1975     is16Bit.link(&m_jit);
1976
1977     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1978
1979     JITCompiler::Jump bigCharacter =
1980         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1981
1982     // 8 bit string values don't need the isASCII check.
1983     cont8Bit.link(&m_jit);
1984
1985     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1986     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1987     m_jit.loadPtr(scratchReg, scratchReg);
1988
1989     addSlowPathGenerator(
1990         slowPathCall(
1991             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1992
1993     if (node->arrayMode().isOutOfBounds()) {
1994 #if USE(JSVALUE32_64)
1995         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1996 #endif
1997
1998         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1999         bool prototypeChainIsSane = false;
2000         if (globalObject->stringPrototypeChainIsSane()) {
2001             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
2002             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
2003             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
2004             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
2005             // indexed properties either.
2006             // https://bugs.webkit.org/show_bug.cgi?id=144668
2007             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
2008             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2009             prototypeChainIsSane = globalObject->stringPrototypeChainIsSane();
2010         }
2011         if (prototypeChainIsSane) {
2012             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
2013             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2014             
2015 #if USE(JSVALUE64)
2016             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2017                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
2018 #else
2019             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2020                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
2021                 baseReg, propertyReg));
2022 #endif
2023         } else {
2024 #if USE(JSVALUE64)
2025             addSlowPathGenerator(
2026                 slowPathCall(
2027                     outOfBounds, this, operationGetByValStringInt,
2028                     scratchReg, baseReg, propertyReg));
2029 #else
2030             addSlowPathGenerator(
2031                 slowPathCall(
2032                     outOfBounds, this, operationGetByValStringInt,
2033                     JSValueRegs(resultTagReg, scratchReg), baseReg, propertyReg));
2034 #endif
2035         }
2036         
2037 #if USE(JSVALUE64)
2038         jsValueResult(scratchReg, m_currentNode);
2039 #else
2040         jsValueResult(resultTagReg, scratchReg, m_currentNode);
2041 #endif
2042     } else
2043         cellResult(scratchReg, m_currentNode);
2044 }
2045
2046 void SpeculativeJIT::compileFromCharCode(Node* node)
2047 {
2048     Edge& child = node->child1();
2049     if (child.useKind() == UntypedUse) {
2050         JSValueOperand opr(this, child);
2051         JSValueRegs oprRegs = opr.jsValueRegs();
2052 #if USE(JSVALUE64)
2053         GPRTemporary result(this);
2054         JSValueRegs resultRegs = JSValueRegs(result.gpr());
2055 #else
2056         GPRTemporary resultTag(this);
2057         GPRTemporary resultPayload(this);
2058         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2059 #endif
2060         flushRegisters();
2061         callOperation(operationStringFromCharCodeUntyped, resultRegs, oprRegs);
2062         m_jit.exceptionCheck();
2063         
2064         jsValueResult(resultRegs, node);
2065         return;
2066     }
2067
2068     SpeculateStrictInt32Operand property(this, child);
2069     GPRReg propertyReg = property.gpr();
2070     GPRTemporary smallStrings(this);
2071     GPRTemporary scratch(this);
2072     GPRReg scratchReg = scratch.gpr();
2073     GPRReg smallStringsReg = smallStrings.gpr();
2074
2075     JITCompiler::JumpList slowCases;
2076     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2077     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2078     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2079
2080     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2081     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2082     cellResult(scratchReg, m_currentNode);
2083 }
2084
2085 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2086 {
2087     VirtualRegister virtualRegister = node->virtualRegister();
2088     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2089
2090     switch (info.registerFormat()) {
2091     case DataFormatStorage:
2092         RELEASE_ASSERT_NOT_REACHED();
2093
2094     case DataFormatBoolean:
2095     case DataFormatCell:
2096         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2097         return GeneratedOperandTypeUnknown;
2098
2099     case DataFormatNone:
2100     case DataFormatJSCell:
2101     case DataFormatJS:
2102     case DataFormatJSBoolean:
2103     case DataFormatJSDouble:
2104         return GeneratedOperandJSValue;
2105
2106     case DataFormatJSInt32:
2107     case DataFormatInt32:
2108         return GeneratedOperandInteger;
2109
2110     default:
2111         RELEASE_ASSERT_NOT_REACHED();
2112         return GeneratedOperandTypeUnknown;
2113     }
2114 }
2115
2116 void SpeculativeJIT::compileValueToInt32(Node* node)
2117 {
2118     switch (node->child1().useKind()) {
2119 #if USE(JSVALUE64)
2120     case Int52RepUse: {
2121         SpeculateStrictInt52Operand op1(this, node->child1());
2122         GPRTemporary result(this, Reuse, op1);
2123         GPRReg op1GPR = op1.gpr();
2124         GPRReg resultGPR = result.gpr();
2125         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
2126         int32Result(resultGPR, node, DataFormatInt32);
2127         return;
2128     }
2129 #endif // USE(JSVALUE64)
2130         
2131     case DoubleRepUse: {
2132         GPRTemporary result(this);
2133         SpeculateDoubleOperand op1(this, node->child1());
2134         FPRReg fpr = op1.fpr();
2135         GPRReg gpr = result.gpr();
2136         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2137         
2138         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, operationToInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
2139         
2140         int32Result(gpr, node);
2141         return;
2142     }
2143     
2144     case NumberUse:
2145     case NotCellUse: {
2146         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2147         case GeneratedOperandInteger: {
2148             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2149             GPRTemporary result(this, Reuse, op1);
2150             m_jit.move(op1.gpr(), result.gpr());
2151             int32Result(result.gpr(), node, op1.format());
2152             return;
2153         }
2154         case GeneratedOperandJSValue: {
2155             GPRTemporary result(this);
2156 #if USE(JSVALUE64)
2157             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2158
2159             GPRReg gpr = op1.gpr();
2160             GPRReg resultGpr = result.gpr();
2161             FPRTemporary tempFpr(this);
2162             FPRReg fpr = tempFpr.fpr();
2163
2164             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2165             JITCompiler::JumpList converted;
2166
2167             if (node->child1().useKind() == NumberUse) {
2168                 DFG_TYPE_CHECK(
2169                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
2170                     m_jit.branchTest64(
2171                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2172             } else {
2173                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2174                 
2175                 DFG_TYPE_CHECK(
2176                     JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
2177                 
2178                 // It's not a cell: so true turns into 1 and all else turns into 0.
2179                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2180                 converted.append(m_jit.jump());
2181                 
2182                 isNumber.link(&m_jit);
2183             }
2184
2185             // First, if we get here we have a double encoded as a JSValue
2186             unboxDouble(gpr, resultGpr, fpr);
2187
2188             silentSpillAllRegisters(resultGpr);
2189             callOperation(operationToInt32, resultGpr, fpr);
2190             silentFillAllRegisters(resultGpr);
2191
2192             converted.append(m_jit.jump());
2193
2194             isInteger.link(&m_jit);
2195             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2196
2197             converted.link(&m_jit);
2198 #else
2199             Node* childNode = node->child1().node();
2200             VirtualRegister virtualRegister = childNode->virtualRegister();
2201             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2202
2203             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2204
2205             GPRReg payloadGPR = op1.payloadGPR();
2206             GPRReg resultGpr = result.gpr();
2207         
2208             JITCompiler::JumpList converted;
2209
2210             if (info.registerFormat() == DataFormatJSInt32)
2211                 m_jit.move(payloadGPR, resultGpr);
2212             else {
2213                 GPRReg tagGPR = op1.tagGPR();
2214                 FPRTemporary tempFpr(this);
2215                 FPRReg fpr = tempFpr.fpr();
2216                 FPRTemporary scratch(this);
2217
2218                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2219
2220                 if (node->child1().useKind() == NumberUse) {
2221                     DFG_TYPE_CHECK(
2222                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2223                         m_jit.branch32(
2224                             MacroAssembler::AboveOrEqual, tagGPR,
2225                             TrustedImm32(JSValue::LowestTag)));
2226                 } else {
2227                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2228                     
2229                     DFG_TYPE_CHECK(
2230                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2231                         m_jit.branchIfCell(op1.jsValueRegs()));
2232                     
2233                     // It's not a cell: so true turns into 1 and all else turns into 0.
2234                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2235                     m_jit.move(TrustedImm32(0), resultGpr);
2236                     converted.append(m_jit.jump());
2237                     
2238                     isBoolean.link(&m_jit);
2239                     m_jit.move(payloadGPR, resultGpr);
2240                     converted.append(m_jit.jump());
2241                     
2242                     isNumber.link(&m_jit);
2243                 }
2244
2245                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2246
2247                 silentSpillAllRegisters(resultGpr);
2248                 callOperation(operationToInt32, resultGpr, fpr);
2249                 silentFillAllRegisters(resultGpr);
2250
2251                 converted.append(m_jit.jump());
2252
2253                 isInteger.link(&m_jit);
2254                 m_jit.move(payloadGPR, resultGpr);
2255
2256                 converted.link(&m_jit);
2257             }
2258 #endif
2259             int32Result(resultGpr, node);
2260             return;
2261         }
2262         case GeneratedOperandTypeUnknown:
2263             RELEASE_ASSERT(!m_compileOkay);
2264             return;
2265         }
2266         RELEASE_ASSERT_NOT_REACHED();
2267         return;
2268     }
2269     
2270     default:
2271         ASSERT(!m_compileOkay);
2272         return;
2273     }
2274 }
2275
2276 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2277 {
2278     if (doesOverflow(node->arithMode())) {
2279         if (enableInt52()) {
2280             SpeculateInt32Operand op1(this, node->child1());
2281             GPRTemporary result(this, Reuse, op1);
2282             m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr());
2283             strictInt52Result(result.gpr(), node);
2284             return;
2285         }
2286         SpeculateInt32Operand op1(this, node->child1());
2287         FPRTemporary result(this);
2288             
2289         GPRReg inputGPR = op1.gpr();
2290         FPRReg outputFPR = result.fpr();
2291             
2292         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2293             
2294         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2295         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2296         positive.link(&m_jit);
2297             
2298         doubleResult(outputFPR, node);
2299         return;
2300     }
2301     
2302     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2303
2304     SpeculateInt32Operand op1(this, node->child1());
2305     GPRTemporary result(this);
2306
2307     m_jit.move(op1.gpr(), result.gpr());
2308
2309     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2310
2311     int32Result(result.gpr(), node, op1.format());
2312 }
2313
2314 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2315 {
2316     SpeculateDoubleOperand op1(this, node->child1());
2317     FPRTemporary scratch(this);
2318     GPRTemporary result(this);
2319     
2320     FPRReg valueFPR = op1.fpr();
2321     FPRReg scratchFPR = scratch.fpr();
2322     GPRReg resultGPR = result.gpr();
2323
2324     JITCompiler::JumpList failureCases;
2325     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2326     m_jit.branchConvertDoubleToInt32(
2327         valueFPR, resultGPR, failureCases, scratchFPR,
2328         shouldCheckNegativeZero(node->arithMode()));
2329     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2330
2331     int32Result(resultGPR, node);
2332 }
2333
2334 void SpeculativeJIT::compileDoubleRep(Node* node)
2335 {
2336     switch (node->child1().useKind()) {
2337     case RealNumberUse: {
2338         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2339         FPRTemporary result(this);
2340         
2341         JSValueRegs op1Regs = op1.jsValueRegs();
2342         FPRReg resultFPR = result.fpr();
2343         
2344 #if USE(JSVALUE64)
2345         GPRTemporary temp(this);
2346         GPRReg tempGPR = temp.gpr();
2347         m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR);
2348 #else
2349         FPRTemporary temp(this);
2350         FPRReg tempFPR = temp.fpr();
2351         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2352 #endif
2353         
2354         JITCompiler::Jump done = m_jit.branchDouble(
2355             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2356         
2357         DFG_TYPE_CHECK(
2358             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2359         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2360         
2361         done.link(&m_jit);
2362         
2363         doubleResult(resultFPR, node);
2364         return;
2365     }
2366     
2367     case NotCellUse:
2368     case NumberUse: {
2369         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2370
2371         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2372         if (isInt32Speculation(possibleTypes)) {
2373             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2374             FPRTemporary result(this);
2375             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2376             doubleResult(result.fpr(), node);
2377             return;
2378         }
2379
2380         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2381         FPRTemporary result(this);
2382
2383 #if USE(JSVALUE64)
2384         GPRTemporary temp(this);
2385
2386         GPRReg op1GPR = op1.gpr();
2387         GPRReg tempGPR = temp.gpr();
2388         FPRReg resultFPR = result.fpr();
2389         JITCompiler::JumpList done;
2390
2391         JITCompiler::Jump isInteger = m_jit.branch64(
2392             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2393
2394         if (node->child1().useKind() == NotCellUse) {
2395             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2396             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2397
2398             static const double zero = 0;
2399             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2400
2401             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2402             done.append(isNull);
2403
2404             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2405                 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2406
2407             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2408             static const double one = 1;
2409             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2410             done.append(m_jit.jump());
2411             done.append(isFalse);
2412
2413             isUndefined.link(&m_jit);
2414             static const double NaN = PNaN;
2415             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2416             done.append(m_jit.jump());
2417
2418             isNumber.link(&m_jit);
2419         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2420             typeCheck(
2421                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2422                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2423         }
2424
2425         unboxDouble(op1GPR, tempGPR, resultFPR);
2426         done.append(m_jit.jump());
2427     
2428         isInteger.link(&m_jit);
2429         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2430         done.link(&m_jit);
2431 #else // USE(JSVALUE64) -> this is the 32_64 case
2432         FPRTemporary temp(this);
2433     
2434         GPRReg op1TagGPR = op1.tagGPR();
2435         GPRReg op1PayloadGPR = op1.payloadGPR();
2436         FPRReg tempFPR = temp.fpr();
2437         FPRReg resultFPR = result.fpr();
2438         JITCompiler::JumpList done;
2439     
2440         JITCompiler::Jump isInteger = m_jit.branch32(
2441             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2442
2443         if (node->child1().useKind() == NotCellUse) {
2444             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2445             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2446
2447             static const double zero = 0;
2448             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2449
2450             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2451             done.append(isNull);
2452
2453             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2454
2455             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2456             static const double one = 1;
2457             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2458             done.append(m_jit.jump());
2459             done.append(isFalse);
2460
2461             isUndefined.link(&m_jit);
2462             static const double NaN = PNaN;
2463             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2464             done.append(m_jit.jump());
2465
2466             isNumber.link(&m_jit);
2467         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2468             typeCheck(
2469                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2470                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2471         }
2472
2473         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2474         done.append(m_jit.jump());
2475     
2476         isInteger.link(&m_jit);
2477         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2478         done.link(&m_jit);
2479 #endif // USE(JSVALUE64)
2480     
2481         doubleResult(resultFPR, node);
2482         return;
2483     }
2484         
2485 #if USE(JSVALUE64)
2486     case Int52RepUse: {
2487         SpeculateStrictInt52Operand value(this, node->child1());
2488         FPRTemporary result(this);
2489         
2490         GPRReg valueGPR = value.gpr();
2491         FPRReg resultFPR = result.fpr();
2492
2493         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2494         
2495         doubleResult(resultFPR, node);
2496         return;
2497     }
2498 #endif // USE(JSVALUE64)
2499         
2500     default:
2501         RELEASE_ASSERT_NOT_REACHED();
2502         return;
2503     }
2504 }
2505
2506 void SpeculativeJIT::compileValueRep(Node* node)
2507 {
2508     switch (node->child1().useKind()) {
2509     case DoubleRepUse: {
2510         SpeculateDoubleOperand value(this, node->child1());
2511         JSValueRegsTemporary result(this);
2512         
2513         FPRReg valueFPR = value.fpr();
2514         JSValueRegs resultRegs = result.regs();
2515         
2516         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2517         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2518         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2519         // local was purified.
2520         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2521             m_jit.purifyNaN(valueFPR);
2522
2523         boxDouble(valueFPR, resultRegs);
2524         
2525         jsValueResult(resultRegs, node);
2526         return;
2527     }
2528         
2529 #if USE(JSVALUE64)
2530     case Int52RepUse: {
2531         SpeculateStrictInt52Operand value(this, node->child1());
2532         GPRTemporary result(this);
2533         
2534         GPRReg valueGPR = value.gpr();
2535         GPRReg resultGPR = result.gpr();
2536         
2537         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2538         
2539         jsValueResult(resultGPR, node);
2540         return;
2541     }
2542 #endif // USE(JSVALUE64)
2543         
2544     default:
2545         RELEASE_ASSERT_NOT_REACHED();
2546         return;
2547     }
2548 }
2549
2550 static double clampDoubleToByte(double d)
2551 {
2552     d += 0.5;
2553     if (!(d > 0))
2554         d = 0;
2555     else if (d > 255)
2556         d = 255;
2557     return d;
2558 }
2559
2560 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2561 {
2562     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2563     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2564     jit.xorPtr(result, result);
2565     MacroAssembler::Jump clamped = jit.jump();
2566     tooBig.link(&jit);
2567     jit.move(JITCompiler::TrustedImm32(255), result);
2568     clamped.link(&jit);
2569     inBounds.link(&jit);
2570 }
2571
2572 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2573 {
2574     // Unordered compare so we pick up NaN
2575     static const double zero = 0;
2576     static const double byteMax = 255;
2577     static const double half = 0.5;
2578     jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2579     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2580     jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2581     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2582     
2583     jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2584     // FIXME: This should probably just use a floating point round!
2585     // https://bugs.webkit.org/show_bug.cgi?id=72054
2586     jit.addDouble(source, scratch);
2587     jit.truncateDoubleToInt32(scratch, result);   
2588     MacroAssembler::Jump truncatedInt = jit.jump();
2589     
2590     tooSmall.link(&jit);
2591     jit.xorPtr(result, result);
2592     MacroAssembler::Jump zeroed = jit.jump();
2593     
2594     tooBig.link(&jit);
2595     jit.move(JITCompiler::TrustedImm32(255), result);
2596     
2597     truncatedInt.link(&jit);
2598     zeroed.link(&jit);
2599
2600 }
2601
2602 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2603 {
2604     if (node->op() == PutByValAlias)
2605         return JITCompiler::Jump();
2606     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2607         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2608     if (view) {
2609         uint32_t length = view->length();
2610         Node* indexNode = m_jit.graph().child(node, 1).node();
2611         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2612             return JITCompiler::Jump();
2613         return m_jit.branch32(
2614             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2615     }
2616     return m_jit.branch32(
2617         MacroAssembler::AboveOrEqual, indexGPR,
2618         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2619 }
2620
2621 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2622 {
2623     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2624     if (!jump.isSet())
2625         return;
2626     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2627 }
2628
2629 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayIsNeuteredIfOutOfBounds(Node* node, GPRReg base, JITCompiler::Jump outOfBounds)
2630 {
2631     JITCompiler::Jump done;
2632     if (outOfBounds.isSet()) {
2633         done = m_jit.jump();
2634         if (node->arrayMode().isInBounds())
2635             speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2636         else {
2637             outOfBounds.link(&m_jit);
2638
2639             JITCompiler::Jump notWasteful = m_jit.branch32(
2640                 MacroAssembler::NotEqual,
2641                 MacroAssembler::Address(base, JSArrayBufferView::offsetOfMode()),
2642                 TrustedImm32(WastefulTypedArray));
2643
2644             JITCompiler::Jump hasNullVector = m_jit.branchTestPtr(
2645                 MacroAssembler::Zero,
2646                 MacroAssembler::Address(base, JSArrayBufferView::offsetOfVector()));
2647             speculationCheck(Uncountable, JSValueSource(), node, hasNullVector);
2648             notWasteful.link(&m_jit);
2649         }
2650     }
2651     return done;
2652 }
2653
2654 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2655 {
2656     ASSERT(isInt(type));
2657     
2658     SpeculateCellOperand base(this, node->child1());
2659     SpeculateStrictInt32Operand property(this, node->child2());
2660     StorageOperand storage(this, node->child3());
2661
2662     GPRReg baseReg = base.gpr();
2663     GPRReg propertyReg = property.gpr();
2664     GPRReg storageReg = storage.gpr();
2665
2666     GPRTemporary result(this);
2667     GPRReg resultReg = result.gpr();
2668
2669     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2670
2671     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2672     switch (elementSize(type)) {
2673     case 1:
2674         if (isSigned(type))
2675             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2676         else
2677             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2678         break;
2679     case 2:
2680         if (isSigned(type))
2681             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2682         else
2683             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2684         break;
2685     case 4:
2686         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2687         break;
2688     default:
2689         CRASH();
2690     }
2691     if (elementSize(type) < 4 || isSigned(type)) {
2692         int32Result(resultReg, node);
2693         return;
2694     }
2695     
2696     ASSERT(elementSize(type) == 4 && !isSigned(type));
2697     if (node->shouldSpeculateInt32()) {
2698         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2699         int32Result(resultReg, node);
2700         return;
2701     }
2702     
2703 #if USE(JSVALUE64)
2704     if (node->shouldSpeculateAnyInt()) {
2705         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2706         strictInt52Result(resultReg, node);
2707         return;
2708     }
2709 #endif
2710     
2711     FPRTemporary fresult(this);
2712     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2713     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2714     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2715     positive.link(&m_jit);
2716     doubleResult(fresult.fpr(), node);
2717 }
2718
2719 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2720 {
2721     ASSERT(isInt(type));
2722     
2723     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2724     GPRReg storageReg = storage.gpr();
2725     
2726     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2727     
2728     GPRTemporary value;
2729     GPRReg valueGPR = InvalidGPRReg;
2730     
2731     if (valueUse->isConstant()) {
2732         JSValue jsValue = valueUse->asJSValue();
2733         if (!jsValue.isNumber()) {
2734             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2735             noResult(node);
2736             return;
2737         }
2738         double d = jsValue.asNumber();
2739         if (isClamped(type)) {
2740             ASSERT(elementSize(type) == 1);
2741             d = clampDoubleToByte(d);
2742         }
2743         GPRTemporary scratch(this);
2744         GPRReg scratchReg = scratch.gpr();
2745         m_jit.move(Imm32(toInt32(d)), scratchReg);
2746         value.adopt(scratch);
2747         valueGPR = scratchReg;
2748     } else {
2749         switch (valueUse.useKind()) {
2750         case Int32Use: {
2751             SpeculateInt32Operand valueOp(this, valueUse);
2752             GPRTemporary scratch(this);
2753             GPRReg scratchReg = scratch.gpr();
2754             m_jit.move(valueOp.gpr(), scratchReg);
2755             if (isClamped(type)) {
2756                 ASSERT(elementSize(type) == 1);
2757                 compileClampIntegerToByte(m_jit, scratchReg);
2758             }
2759             value.adopt(scratch);
2760             valueGPR = scratchReg;
2761             break;
2762         }
2763             
2764 #if USE(JSVALUE64)
2765         case Int52RepUse: {
2766             SpeculateStrictInt52Operand valueOp(this, valueUse);
2767             GPRTemporary scratch(this);
2768             GPRReg scratchReg = scratch.gpr();
2769             m_jit.move(valueOp.gpr(), scratchReg);
2770             if (isClamped(type)) {
2771                 ASSERT(elementSize(type) == 1);
2772                 MacroAssembler::Jump inBounds = m_jit.branch64(
2773                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2774                 MacroAssembler::Jump tooBig = m_jit.branch64(
2775                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2776                 m_jit.move(TrustedImm32(0), scratchReg);
2777                 MacroAssembler::Jump clamped = m_jit.jump();
2778                 tooBig.link(&m_jit);
2779                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2780                 clamped.link(&m_jit);
2781                 inBounds.link(&m_jit);
2782             }
2783             value.adopt(scratch);
2784             valueGPR = scratchReg;
2785             break;
2786         }
2787 #endif // USE(JSVALUE64)
2788             
2789         case DoubleRepUse: {
2790             if (isClamped(type)) {
2791                 ASSERT(elementSize(type) == 1);
2792                 SpeculateDoubleOperand valueOp(this, valueUse);
2793                 GPRTemporary result(this);
2794                 FPRTemporary floatScratch(this);
2795                 FPRReg fpr = valueOp.fpr();
2796                 GPRReg gpr = result.gpr();
2797                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2798                 value.adopt(result);
2799                 valueGPR = gpr;
2800             } else {
2801                 SpeculateDoubleOperand valueOp(this, valueUse);
2802                 GPRTemporary result(this);
2803                 FPRReg fpr = valueOp.fpr();
2804                 GPRReg gpr = result.gpr();
2805                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2806                 m_jit.xorPtr(gpr, gpr);
2807                 MacroAssembler::Jump fixed = m_jit.jump();
2808                 notNaN.link(&m_jit);
2809                 
2810                 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2811                     fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2812                 
2813                 addSlowPathGenerator(slowPathCall(failed, this, operationToInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
2814                 
2815                 fixed.link(&m_jit);
2816                 value.adopt(result);
2817                 valueGPR = gpr;
2818             }
2819             break;
2820         }
2821             
2822         default:
2823             RELEASE_ASSERT_NOT_REACHED();
2824             break;
2825         }
2826     }
2827     
2828     ASSERT_UNUSED(valueGPR, valueGPR != property);
2829     ASSERT(valueGPR != base);
2830     ASSERT(valueGPR != storageReg);
2831     JITCompiler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2832
2833     switch (elementSize(type)) {
2834     case 1:
2835         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2836         break;
2837     case 2:
2838         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2839         break;
2840     case 4:
2841         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2842         break;
2843     default:
2844         CRASH();
2845     }
2846
2847     JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
2848     if (done.isSet())
2849         done.link(&m_jit);
2850     noResult(node);
2851 }
2852
2853 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2854 {
2855     ASSERT(isFloat(type));
2856     
2857     SpeculateCellOperand base(this, node->child1());
2858     SpeculateStrictInt32Operand property(this, node->child2());
2859     StorageOperand storage(this, node->child3());
2860
2861     GPRReg baseReg = base.gpr();
2862     GPRReg propertyReg = property.gpr();
2863     GPRReg storageReg = storage.gpr();
2864
2865     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2866
2867     FPRTemporary result(this);
2868     FPRReg resultReg = result.fpr();
2869     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2870     switch (elementSize(type)) {
2871     case 4:
2872         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2873         m_jit.convertFloatToDouble(resultReg, resultReg);
2874         break;
2875     case 8: {
2876         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2877         break;
2878     }
2879     default:
2880         RELEASE_ASSERT_NOT_REACHED();
2881     }
2882     
2883     doubleResult(resultReg, node);
2884 }
2885
2886 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2887 {
2888     ASSERT(isFloat(type));
2889     
2890     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2891     GPRReg storageReg = storage.gpr();
2892     
2893     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2894     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2895
2896     SpeculateDoubleOperand valueOp(this, valueUse);
2897     FPRTemporary scratch(this);
2898     FPRReg valueFPR = valueOp.fpr();
2899     FPRReg scratchFPR = scratch.fpr();
2900
2901     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2902     
2903     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2904     
2905     switch (elementSize(type)) {
2906     case 4: {
2907         m_jit.moveDouble(valueFPR, scratchFPR);
2908         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2909         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2910         break;
2911     }
2912     case 8:
2913         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2914         break;
2915     default:
2916         RELEASE_ASSERT_NOT_REACHED();
2917     }
2918
2919     JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
2920     if (done.isSet())
2921         done.link(&m_jit);
2922     noResult(node);
2923 }
2924
2925 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2926 {
2927     // Check that prototype is an object.
2928     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
2929     
2930     // Initialize scratchReg with the value being checked.
2931     m_jit.move(valueReg, scratchReg);
2932     
2933     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2934     MacroAssembler::Label loop(&m_jit);
2935     MacroAssembler::Jump performDefaultHasInstance = m_jit.branch8(MacroAssembler::Equal,
2936         MacroAssembler::Address(scratchReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType));
2937     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2938     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2939     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2940 #if USE(JSVALUE64)
2941     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2942 #else
2943     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2944 #endif
2945     
2946     // No match - result is false.
2947 #if USE(JSVALUE64)
2948     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2949 #else
2950     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2951 #endif
2952     MacroAssembler::JumpList doneJumps; 
2953     doneJumps.append(m_jit.jump());
2954
2955     performDefaultHasInstance.link(&m_jit);
2956     silentSpillAllRegisters(scratchReg);
2957     callOperation(operationDefaultHasInstance, scratchReg, valueReg, prototypeReg); 
2958     silentFillAllRegisters(scratchReg);
2959     m_jit.exceptionCheck();
2960 #if USE(JSVALUE64)
2961     m_jit.or32(TrustedImm32(ValueFalse), scratchReg);
2962 #endif
2963     doneJumps.append(m_jit.jump());
2964     
2965     isInstance.link(&m_jit);
2966 #if USE(JSVALUE64)
2967     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2968 #else
2969     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2970 #endif
2971     
2972     doneJumps.link(&m_jit);
2973 }
2974
2975 void SpeculativeJIT::compileCheckTypeInfoFlags(Node* node)
2976 {
2977     SpeculateCellOperand base(this, node->child1());
2978
2979     GPRReg baseGPR = base.gpr();
2980
2981     speculationCheck(BadTypeInfoFlags, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(node->typeInfoOperand())));
2982
2983     noResult(node);
2984 }
2985
2986 void SpeculativeJIT::compileInstanceOf(Node* node)
2987 {
2988     if (node->child1().useKind() == UntypedUse) {
2989         // It might not be a cell. Speculate less aggressively.
2990         // Or: it might only be used once (i.e. by us), so we get zero benefit
2991         // from speculating any more aggressively than we absolutely need to.
2992         
2993         JSValueOperand value(this, node->child1());
2994         SpeculateCellOperand prototype(this, node->child2());
2995         GPRTemporary scratch(this);
2996         GPRTemporary scratch2(this);
2997         
2998         GPRReg prototypeReg = prototype.gpr();
2999         GPRReg scratchReg = scratch.gpr();
3000         GPRReg scratch2Reg = scratch2.gpr();
3001         
3002         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
3003         GPRReg valueReg = value.jsValueRegs().payloadGPR();
3004         moveFalseTo(scratchReg);
3005
3006         MacroAssembler::Jump done = m_jit.jump();
3007         
3008         isCell.link(&m_jit);
3009         
3010         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
3011         
3012         done.link(&m_jit);
3013
3014         blessedBooleanResult(scratchReg, node);
3015         return;
3016     }
3017     
3018     SpeculateCellOperand value(this, node->child1());
3019     SpeculateCellOperand prototype(this, node->child2());
3020     
3021     GPRTemporary scratch(this);
3022     GPRTemporary scratch2(this);
3023     
3024     GPRReg valueReg = value.gpr();
3025     GPRReg prototypeReg = prototype.gpr();
3026     GPRReg scratchReg = scratch.gpr();
3027     GPRReg scratch2Reg = scratch2.gpr();
3028     
3029     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
3030
3031     blessedBooleanResult(scratchReg, node);
3032 }
3033
3034 template<typename SnippetGenerator, J_JITOperation_EJJ snippetSlowPathFunction>
3035 void SpeculativeJIT::emitUntypedBitOp(Node* node)
3036 {
3037     Edge& leftChild = node->child1();
3038     Edge& rightChild = node->child2();
3039
3040     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3041         JSValueOperand left(this, leftChild);
3042         JSValueOperand right(this, rightChild);
3043         JSValueRegs leftRegs = left.jsValueRegs();
3044         JSValueRegs rightRegs = right.jsValueRegs();
3045 #if USE(JSVALUE64)
3046         GPRTemporary result(this);
3047         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3048 #else
3049         GPRTemporary resultTag(this);
3050         GPRTemporary resultPayload(this);
3051         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3052 #endif
3053         flushRegisters();
3054         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3055         m_jit.exceptionCheck();
3056
3057         jsValueResult(resultRegs, node);
3058         return;
3059     }
3060
3061     Optional<JSValueOperand> left;
3062     Optional<JSValueOperand> right;
3063
3064     JSValueRegs leftRegs;
3065     JSValueRegs rightRegs;
3066
3067 #if USE(JSVALUE64)
3068     GPRTemporary result(this);
3069     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3070     GPRTemporary scratch(this);
3071     GPRReg scratchGPR = scratch.gpr();
3072 #else
3073     GPRTemporary resultTag(this);
3074     GPRTemporary resultPayload(this);
3075     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3076     GPRReg scratchGPR = resultTag.gpr();
3077 #endif
3078
3079     SnippetOperand leftOperand;
3080     SnippetOperand rightOperand;
3081
3082     // The snippet generator does not support both operands being constant. If the left
3083     // operand is already const, we'll ignore the right operand's constness.
3084     if (leftChild->isInt32Constant())
3085         leftOperand.setConstInt32(leftChild->asInt32());
3086     else if (rightChild->isInt32Constant())
3087         rightOperand.setConstInt32(rightChild->asInt32());
3088
3089     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3090
3091     if (!leftOperand.isConst()) {
3092         left = JSValueOperand(this, leftChild);
3093         leftRegs = left->jsValueRegs();
3094     }
3095     if (!rightOperand.isConst()) {
3096         right = JSValueOperand(this, rightChild);
3097         rightRegs = right->jsValueRegs();
3098     }
3099
3100     SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
3101     gen.generateFastPath(m_jit);
3102
3103     ASSERT(gen.didEmitFastPath());
3104     gen.endJumpList().append(m_jit.jump());
3105
3106     gen.slowPathJumpList().link(&m_jit);
3107     silentSpillAllRegisters(resultRegs);
3108
3109     if (leftOperand.isConst()) {
3110         leftRegs = resultRegs;
3111         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3112     } else if (rightOperand.isConst()) {
3113         rightRegs = resultRegs;
3114         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3115     }
3116
3117     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3118
3119     silentFillAllRegisters(resultRegs);
3120     m_jit.exceptionCheck();
3121
3122     gen.endJumpList().link(&m_jit);
3123     jsValueResult(resultRegs, node);
3124 }
3125
3126 void SpeculativeJIT::compileBitwiseOp(Node* node)
3127 {
3128     NodeType op = node->op();
3129     Edge& leftChild = node->child1();
3130     Edge& rightChild = node->child2();
3131
3132     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3133         switch (op) {
3134         case BitAnd:
3135             emitUntypedBitOp<JITBitAndGenerator, operationValueBitAnd>(node);
3136             return;
3137         case BitOr:
3138             emitUntypedBitOp<JITBitOrGenerator, operationValueBitOr>(node);
3139             return;
3140         case BitXor:
3141             emitUntypedBitOp<JITBitXorGenerator, operationValueBitXor>(node);
3142             return;
3143         default:
3144             RELEASE_ASSERT_NOT_REACHED();
3145         }
3146     }
3147
3148     if (leftChild->isInt32Constant()) {
3149         SpeculateInt32Operand op2(this, rightChild);
3150         GPRTemporary result(this, Reuse, op2);
3151
3152         bitOp(op, leftChild->asInt32(), op2.gpr(), result.gpr());
3153
3154         int32Result(result.gpr(), node);
3155
3156     } else if (rightChild->isInt32Constant()) {
3157         SpeculateInt32Operand op1(this, leftChild);
3158         GPRTemporary result(this, Reuse, op1);
3159
3160         bitOp(op, rightChild->asInt32(), op1.gpr(), result.gpr());
3161
3162         int32Result(result.gpr(), node);
3163
3164     } else {
3165         SpeculateInt32Operand op1(this, leftChild);
3166         SpeculateInt32Operand op2(this, rightChild);
3167         GPRTemporary result(this, Reuse, op1, op2);
3168         
3169         GPRReg reg1 = op1.gpr();
3170         GPRReg reg2 = op2.gpr();
3171         bitOp(op, reg1, reg2, result.gpr());
3172         
3173         int32Result(result.gpr(), node);
3174     }
3175 }
3176
3177 void SpeculativeJIT::emitUntypedRightShiftBitOp(Node* node)
3178 {
3179     J_JITOperation_EJJ snippetSlowPathFunction = node->op() == BitRShift
3180         ? operationValueBitRShift : operationValueBitURShift;
3181     JITRightShiftGenerator::ShiftType shiftType = node->op() == BitRShift
3182         ? JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift;
3183
3184     Edge& leftChild = node->child1();
3185     Edge& rightChild = node->child2();
3186
3187     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3188         JSValueOperand left(this, leftChild);
3189         JSValueOperand right(this, rightChild);
3190         JSValueRegs leftRegs = left.jsValueRegs();
3191         JSValueRegs rightRegs = right.jsValueRegs();
3192 #if USE(JSVALUE64)
3193         GPRTemporary result(this);
3194         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3195 #else
3196         GPRTemporary resultTag(this);
3197         GPRTemporary resultPayload(this);
3198         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3199 #endif
3200         flushRegisters();
3201         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3202         m_jit.exceptionCheck();
3203
3204         jsValueResult(resultRegs, node);
3205         return;
3206     }
3207
3208     Optional<JSValueOperand> left;
3209     Optional<JSValueOperand> right;
3210
3211     JSValueRegs leftRegs;
3212     JSValueRegs rightRegs;
3213
3214     FPRTemporary leftNumber(this);
3215     FPRReg leftFPR = leftNumber.fpr();
3216
3217 #if USE(JSVALUE64)
3218     GPRTemporary result(this);
3219     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3220     GPRTemporary scratch(this);
3221     GPRReg scratchGPR = scratch.gpr();
3222     FPRReg scratchFPR = InvalidFPRReg;
3223 #else
3224     GPRTemporary resultTag(this);
3225     GPRTemporary resultPayload(this);
3226     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3227     GPRReg scratchGPR = resultTag.gpr();
3228     FPRTemporary fprScratch(this);
3229     FPRReg scratchFPR = fprScratch.fpr();
3230 #endif
3231
3232     SnippetOperand leftOperand;
3233     SnippetOperand rightOperand;
3234
3235     // The snippet generator does not support both operands being constant. If the left
3236     // operand is already const, we'll ignore the right operand's constness.
3237     if (leftChild->isInt32Constant())
3238         leftOperand.setConstInt32(leftChild->asInt32());
3239     else if (rightChild->isInt32Constant())
3240         rightOperand.setConstInt32(rightChild->asInt32());
3241
3242     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3243
3244     if (!leftOperand.isConst()) {
3245         left = JSValueOperand(this, leftChild);
3246         leftRegs = left->jsValueRegs();
3247     }
3248     if (!rightOperand.isConst()) {
3249         right = JSValueOperand(this, rightChild);
3250         rightRegs = right->jsValueRegs();
3251     }
3252
3253     JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3254         leftFPR, scratchGPR, scratchFPR, shiftType);
3255     gen.generateFastPath(m_jit);
3256
3257     ASSERT(gen.didEmitFastPath());
3258     gen.endJumpList().append(m_jit.jump());
3259
3260     gen.slowPathJumpList().link(&m_jit);
3261     silentSpillAllRegisters(resultRegs);
3262
3263     if (leftOperand.isConst()) {
3264         leftRegs = resultRegs;
3265         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3266     } else if (rightOperand.isConst()) {
3267         rightRegs = resultRegs;
3268         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3269     }
3270
3271     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3272
3273     silentFillAllRegisters(resultRegs);
3274     m_jit.exceptionCheck();
3275
3276     gen.endJumpList().link(&m_jit);
3277     jsValueResult(resultRegs, node);
3278     return;
3279 }
3280
3281 void SpeculativeJIT::compileShiftOp(Node* node)
3282 {
3283     NodeType op = node->op();
3284     Edge& leftChild = node->child1();
3285     Edge& rightChild = node->child2();
3286
3287     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3288         switch (op) {
3289         case BitLShift:
3290             emitUntypedBitOp<JITLeftShiftGenerator, operationValueBitLShift>(node);
3291             return;
3292         case BitRShift:
3293         case BitURShift:
3294             emitUntypedRightShiftBitOp(node);
3295             return;
3296         default:
3297             RELEASE_ASSERT_NOT_REACHED();
3298         }
3299     }
3300
3301     if (rightChild->isInt32Constant()) {
3302         SpeculateInt32Operand op1(this, leftChild);
3303         GPRTemporary result(this, Reuse, op1);
3304
3305         shiftOp(op, op1.gpr(), rightChild->asInt32() & 0x1f, result.gpr());
3306
3307         int32Result(result.gpr(), node);
3308     } else {
3309         // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
3310         SpeculateInt32Operand op1(this, leftChild);
3311         SpeculateInt32Operand op2(this, rightChild);
3312         GPRTemporary result(this, Reuse, op1);
3313
3314         GPRReg reg1 = op1.gpr();
3315         GPRReg reg2 = op2.gpr();
3316         shiftOp(op, reg1, reg2, result.gpr());
3317
3318         int32Result(result.gpr(), node);
3319     }
3320 }
3321
3322 void SpeculativeJIT::compileValueAdd(Node* node)
3323 {
3324     Edge& leftChild = node->child1();
3325     Edge& rightChild = node->child2();
3326
3327     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3328         JSValueOperand left(this, leftChild);
3329         JSValueOperand right(this, rightChild);
3330         JSValueRegs leftRegs = left.jsValueRegs();
3331         JSValueRegs rightRegs = right.jsValueRegs();
3332 #if USE(JSVALUE64)
3333         GPRTemporary result(this);
3334         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3335 #else
3336         GPRTemporary resultTag(this);
3337         GPRTemporary resultPayload(this);
3338         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3339 #endif
3340         flushRegisters();
3341         callOperation(operationValueAddNotNumber, resultRegs, leftRegs, rightRegs);
3342         m_jit.exceptionCheck();
3343     
3344         jsValueResult(resultRegs, node);
3345         return;
3346     }
3347
3348 #if USE(JSVALUE64)
3349     bool needsScratchGPRReg = true;
3350     bool needsScratchFPRReg = false;
3351 #else
3352     bool needsScratchGPRReg = true;
3353     bool needsScratchFPRReg = true;
3354 #endif
3355
3356     JITAddIC* addIC = m_jit.codeBlock()->addJITAddIC();
3357     auto repatchingFunction = operationValueAddOptimize;
3358     auto nonRepatchingFunction = operationValueAdd;
3359     
3360     compileMathIC(node, addIC, needsScratchGPRReg, needsScratchFPRReg, repatchingFunction, nonRepatchingFunction);
3361 }
3362
3363 template <typename Generator, typename RepatchingFunction, typename NonRepatchingFunction>
3364 void SpeculativeJIT::compileMathIC(Node* node, JITMathIC<Generator>* mathIC, bool needsScratchGPRReg, bool needsScratchFPRReg, RepatchingFunction repatchingFunction, NonRepatchingFunction nonRepatchingFunction)
3365 {
3366     Edge& leftChild = node->child1();
3367     Edge& rightChild = node->child2();
3368
3369     Optional<JSValueOperand> left;
3370     Optional<JSValueOperand> right;
3371
3372     JSValueRegs leftRegs;
3373     JSValueRegs rightRegs;
3374
3375     FPRTemporary leftNumber(this);
3376     FPRTemporary rightNumber(this);
3377     FPRReg leftFPR = leftNumber.fpr();
3378     FPRReg rightFPR = rightNumber.fpr();
3379
3380     GPRReg scratchGPR = InvalidGPRReg;
3381     FPRReg scratchFPR = InvalidFPRReg;
3382
3383     Optional<FPRTemporary> fprScratch;
3384     if (needsScratchFPRReg) {
3385         fprScratch = FPRTemporary(this);
3386         scratchFPR = fprScratch->fpr();
3387     }
3388
3389 #if USE(JSVALUE64)
3390     Optional<GPRTemporary> gprScratch;
3391     if (needsScratchGPRReg) {
3392         gprScratch = GPRTemporary(this);
3393         scratchGPR = gprScratch->gpr();
3394     }
3395     GPRTemporary result(this);
3396     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3397 #else
3398     GPRTemporary resultTag(this);
3399     GPRTemporary resultPayload(this);
3400     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3401     if (needsScratchGPRReg)
3402         scratchGPR = resultRegs.tagGPR();
3403 #endif
3404
3405     SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3406     SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3407
3408     // The snippet generator does not support both operands being constant. If the left
3409     // operand is already const, we'll ignore the right operand's constness.
3410     if (leftChild->isInt32Constant())
3411         leftOperand.setConstInt32(leftChild->asInt32());
3412     else if (rightChild->isInt32Constant())
3413         rightOperand.setConstInt32(rightChild->asInt32());
3414
3415     ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3416
3417     if (!mathIC->isLeftOperandValidConstant()) {
3418         left = JSValueOperand(this, leftChild);
3419         leftRegs = left->jsValueRegs();
3420     }
3421     if (!mathIC->isRightOperandValidConstant()) {
3422         right = JSValueOperand(this, rightChild);
3423         rightRegs = right->jsValueRegs();
3424     }
3425
3426     Box<MathICGenerationState> addICGenerationState = Box<MathICGenerationState>::create();
3427     ArithProfile* arithProfile = m_jit.graph().baselineCodeBlockFor(node->origin.semantic)->arithProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex);
3428     mathIC->m_generator = Generator(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, leftFPR, rightFPR, scratchGPR, scratchFPR, arithProfile);
3429
3430     bool shouldEmitProfiling = false;
3431     bool generatedInline = mathIC->generateInline(m_jit, *addICGenerationState, shouldEmitProfiling);
3432
3433     if (generatedInline) {
3434         ASSERT(!addICGenerationState->slowPathJumps.empty());
3435
3436         Vector<SilentRegisterSavePlan> savePlans;
3437         silentSpillAllRegistersImpl(false, savePlans, resultRegs);
3438
3439         auto done = m_jit.label();
3440
3441         addSlowPathGenerator([=, savePlans = WTFMove(savePlans)] () {
3442             addICGenerationState->slowPathJumps.link(&m_jit);
3443             addICGenerationState->slowPathStart = m_jit.label();
3444
3445             silentSpill(savePlans);
3446
3447             auto innerLeftRegs = leftRegs;
3448             auto innerRightRegs = rightRegs;
3449             if (mathIC->isLeftOperandValidConstant()) {
3450                 innerLeftRegs = resultRegs;
3451                 m_jit.moveValue(leftChild->asJSValue(), innerLeftRegs);
3452             } else if (mathIC->isRightOperandValidConstant()) {
3453                 innerRightRegs = resultRegs;
3454                 m_jit.moveValue(rightChild->asJSValue(), innerRightRegs);
3455             }
3456
3457             if (addICGenerationState->shouldSlowPathRepatch)
3458                 addICGenerationState->slowPathCall = callOperation(bitwise_cast<J_JITOperation_EJJMic>(repatchingFunction), resultRegs, innerLeftRegs, innerRightRegs, TrustedImmPtr(mathIC));
3459             else
3460                 addICGenerationState->slowPathCall = callOperation(nonRepatchingFunction, resultRegs, innerLeftRegs, innerRightRegs);
3461
3462             silentFill(savePlans);
3463             m_jit.exceptionCheck();
3464             m_jit.jump().linkTo(done, &m_jit);
3465
3466             m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
3467                 mathIC->finalizeInlineCode(*addICGenerationState, linkBuffer);
3468             });
3469         });
3470     } else {
3471         if (mathIC->isLeftOperandValidConstant()) {
3472             left = JSValueOperand(this, leftChild);
3473             leftRegs = left->jsValueRegs();
3474         } else if (mathIC->isRightOperandValidConstant()) {
3475             right = JSValueOperand(this, rightChild);
3476             rightRegs = right->jsValueRegs();
3477         }
3478
3479         flushRegisters();
3480         callOperation(nonRepatchingFunction, resultRegs, leftRegs, rightRegs);
3481         m_jit.exceptionCheck();
3482     }
3483
3484     jsValueResult(resultRegs, node);
3485     return;
3486 }
3487
3488 void SpeculativeJIT::compileInstanceOfCustom(Node* node)
3489 {
3490     // We could do something smarter here but this case is currently super rare and unless
3491     // Symbol.hasInstance becomes popular will likely remain that way.
3492
3493     JSValueOperand value(this, node->child1());
3494     SpeculateCellOperand constructor(this, node->child2());
3495     JSValueOperand hasInstanceValue(this, node->child3());
3496     GPRTemporary result(this);
3497
3498     JSValueRegs valueRegs = value.jsValueRegs();
3499     GPRReg constructorGPR = constructor.gpr();
3500     JSValueRegs hasInstanceRegs = hasInstanceValue.jsValueRegs();
3501     GPRReg resultGPR = result.gpr();
3502
3503     MacroAssembler::Jump slowCase = m_jit.jump();
3504
3505     addSlowPathGenerator(slowPathCall(slowCase, this, operationInstanceOfCustom, resultGPR, valueRegs, constructorGPR, hasInstanceRegs));
3506
3507     unblessedBooleanResult(resultGPR, node);
3508 }
3509
3510 void SpeculativeJIT::compileIsJSArray(Node* node)
3511 {
3512     JSValueOperand value(this, node->child1());
3513     GPRFlushedCallResult result(this);
3514
3515     JSValueRegs valueRegs = value.jsValueRegs();
3516     GPRReg resultGPR = result.gpr();
3517
3518     JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3519
3520     m_jit.compare8(JITCompiler::Equal,
3521         JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()),
3522         TrustedImm32(ArrayType),
3523         resultGPR);
3524     blessBoolean(resultGPR);
3525     JITCompiler::Jump done = m_jit.jump();
3526
3527     isNotCell.link(&m_jit);
3528     moveFalseTo(resultGPR);
3529
3530     done.link(&m_jit);
3531     blessedBooleanResult(resultGPR, node);
3532 }
3533
3534 void SpeculativeJIT::compileIsRegExpObject(Node* node)
3535 {
3536     JSValueOperand value(this, node->child1());
3537     GPRFlushedCallResult result(this);
3538
3539     JSValueRegs valueRegs = value.jsValueRegs();
3540     GPRReg resultGPR = result.gpr();
3541
3542     JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3543
3544     m_jit.compare8(JITCompiler::Equal,
3545         JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()),
3546         TrustedImm32(RegExpObjectType),
3547         resultGPR);
3548     blessBoolean(resultGPR);
3549     JITCompiler::Jump done = m_jit.jump();
3550
3551     isNotCell.link(&m_jit);
3552     moveFalseTo(resultGPR);
3553
3554     done.link(&m_jit);
3555     blessedBooleanResult(resultGPR, node);
3556 }
3557
3558 void SpeculativeJIT::compileIsTypedArrayView(Node* node)
3559 {
3560     JSValueOperand value(this, node->child1());
3561 #if USE(JSVALUE64)
3562     GPRTemporary result(this, Reuse, value);
3563 #else
3564     GPRTemporary result(this, Reuse, value, PayloadWord);
3565 #endif
3566
3567     JSValueRegs valueRegs = value.jsValueRegs();
3568     GPRReg resultGPR = result.gpr();
3569
3570     JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3571
3572     m_jit.load8(JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()), resultGPR);
3573     m_jit.sub32(TrustedImm32(Int8ArrayType), resultGPR);
3574     m_jit.compare32(JITCompiler::BelowOrEqual,
3575         resultGPR,
3576         TrustedImm32(Float64ArrayType - Int8ArrayType),
3577         resultGPR);
3578     blessBoolean(resultGPR);
3579     JITCompiler::Jump done = m_jit.jump();
3580
3581     isNotCell.link(&m_jit);
3582     moveFalseTo(resultGPR);
3583
3584     done.link(&m_jit);
3585     blessedBooleanResult(resultGPR, node);
3586 }
3587
3588 void SpeculativeJIT::compileCallObjectConstructor(Node* node)
3589 {
3590     RELEASE_ASSERT(node->child1().useKind() == UntypedUse);
3591     JSValueOperand value(this, node->child1());
3592 #if USE(JSVALUE64)
3593     GPRTemporary result(this, Reuse, value);
3594 #else
3595     GPRTemporary result(this, Reuse, value, PayloadWord);
3596 #endif
3597
3598     JSValueRegs valueRegs = value.jsValueRegs();
3599     GPRReg resultGPR = result.gpr();
3600
3601     MacroAssembler::JumpList slowCases;
3602     slowCases.append(m_jit.branchIfNotCell(valueRegs));
3603     slowCases.append(m_jit.branchIfNotObject(valueRegs.payloadGPR()));
3604     m_jit.move(valueRegs.payloadGPR(), resultGPR);
3605
3606     addSlowPathGenerator(slowPathCall(slowCases, this, operationObjectConstructor, resultGPR, m_jit.globalObjectFor(node->origin.semantic), valueRegs));
3607     cellResult(resultGPR, node);
3608 }
3609
3610 void SpeculativeJIT::compileArithAdd(Node* node)
3611 {
3612     switch (node->binaryUseKind()) {
3613     case Int32Use: {
3614         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3615
3616         if (node->child2()->isInt32Constant()) {
3617             SpeculateInt32Operand op1(this, node->child1());
3618             GPRTemporary result(this, Reuse, op1);
3619
3620             GPRReg gpr1 = op1.gpr();
3621             int32_t imm2 = node->child2()->asInt32();
3622             GPRReg gprResult = result.gpr();
3623
3624             if (!shouldCheckOverflow(node->arithMode())) {
3625                 m_jit.add32(Imm32(imm2), gpr1, gprResult);
3626                 int32Result(gprResult, node);
3627                 return;
3628             }
3629
3630             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, Imm32(imm2), gprResult);
3631             if (gpr1 == gprResult) {
3632                 speculationCheck(Overflow, JSValueRegs(), 0, check,
3633                     SpeculationRecovery(SpeculativeAddImmediate, gpr1, imm2));
3634             } else
3635                 speculationCheck(Overflow, JSValueRegs(), 0, check);
3636
3637             int32Result(gprResult, node);
3638             return;
3639         }
3640                 
3641         SpeculateInt32Operand op1(this, node->child1());
3642         SpeculateInt32Operand op2(this, node->child2());
3643         GPRTemporary result(this, Reuse, op1, op2);
3644
3645         GPRReg gpr1 = op1.gpr();
3646         GPRReg gpr2 = op2.gpr();
3647         GPRReg gprResult = result.gpr();
3648
3649         if (!shouldCheckOverflow(node->arithMode()))
3650             m_jit.add32(gpr1, gpr2, gprResult);
3651         else {
3652             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
3653                 
3654             if (gpr1 == gprResult)
3655                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
3656             else if (gpr2 == gprResult)
3657                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
3658             else
3659                 speculationCheck(Overflow, JSValueRegs(), 0, check);
3660         }
3661
3662         int32Result(gprResult, node);
3663         return;
3664     }
3665         
3666 #if USE(JSVALUE64)
3667     case Int52RepUse: {
3668         ASSERT(shouldCheckOverflow(node->arithMode()));
3669         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3670
3671         // Will we need an overflow check? If we can prove that neither input can be
3672         // Int52 then the overflow check will not be necessary.
3673         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52Only)
3674             && !m_state.forNode(node->child2()).couldBeType(SpecInt52Only)) {
3675             SpeculateWhicheverInt52Operand op1(this, node->child1());
3676             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3677             GPRTemporary result(this, Reuse, op1);
3678             m_jit.add64(op1.gpr(), op2.gpr(), result.gpr());
3679             int52Result(result.gpr(), node, op1.format());
3680             return;
3681         }
3682         
3683         SpeculateInt52Operand op1(this, node->child1());
3684         SpeculateInt52Operand op2(this, node->child2());
3685         GPRTemporary result(this);
3686         m_jit.move(op1.gpr(), result.gpr());
3687         speculationCheck(
3688             Int52Overflow, JSValueRegs(), 0,
3689             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3690         int52Result(result.gpr(), node);
3691         return;
3692     }
3693 #endif // USE(JSVALUE64)
3694     
3695     case DoubleRepUse: {
3696         SpeculateDoubleOperand op1(this, node->child1());
3697         SpeculateDoubleOperand op2(this, node->child2());
3698         FPRTemporary result(this, op1, op2);
3699
3700         FPRReg reg1 = op1.fpr();
3701         FPRReg reg2 = op2.fpr();
3702         m_jit.addDouble(reg1, reg2, result.fpr());
3703
3704         doubleResult(result.fpr(), node);
3705         return;
3706     }
3707         
3708     default:
3709         RELEASE_ASSERT_NOT_REACHED();
3710         break;
3711     }
3712 }
3713
3714 void SpeculativeJIT::compileMakeRope(Node* node)
3715 {
3716     ASSERT(node->child1().useKind() == KnownStringUse);
3717     ASSERT(node->child2().useKind() == KnownStringUse);
3718     ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
3719     
3720     SpeculateCellOperand op1(this, node->child1());
3721     SpeculateCellOperand op2(this, node->child2());
3722     SpeculateCellOperand op3(this, node->child3());
3723     GPRTemporary result(this);
3724     GPRTemporary allocator(this);
3725     GPRTemporary scratch(this);
3726     
3727     GPRReg opGPRs[3];
3728     unsigned numOpGPRs;
3729     opGPRs[0] = op1.gpr();
3730     opGPRs[1] = op2.gpr();
3731     if (node->child3()) {
3732         opGPRs[2] = op3.gpr();
3733         numOpGPRs = 3;
3734     } else {
3735         opGPRs[2] = InvalidGPRReg;
3736         numOpGPRs = 2;
3737     }
3738     GPRReg resultGPR = result.gpr();
3739     GPRReg allocatorGPR = allocator.gpr();
3740     GPRReg scratchGPR = scratch.gpr();
3741     
3742     JITCompiler::JumpList slowPath;
3743     MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithDestructor(sizeof(JSRopeString));
3744     m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
3745     emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
3746         
3747     m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
3748     for (unsigned i = 0; i < numOpGPRs; ++i)
3749         m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3750     for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
3751         m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3752     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
3753     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
3754     if (!ASSERT_DISABLED) {
3755         JITCompiler::Jump ok = m_jit.branch32(
3756             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
3757         m_jit.abortWithReason(DFGNegativeStringLength);
3758         ok.link(&m_jit);
3759     }
3760     for (unsigned i = 1; i < numOpGPRs; ++i) {
3761         m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
3762         speculationCheck(
3763             Uncountable, JSValueSource(), nullptr,
3764             m_jit.branchAdd32(
3765                 JITCompiler::Overflow,
3766                 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
3767     }
3768     m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
3769     m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
3770     if (!ASSERT_DISABLED) {
3771         JITCompiler::Jump ok = m_jit.branch32(
3772             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
3773         m_jit.abortWithReason(DFGNegativeStringLength);
3774         ok.link(&m_jit);
3775     }
3776     m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
3777     
3778     switch (numOpGPRs) {
3779     case 2:
3780         addSlowPathGenerator(slowPathCall(
3781             slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
3782         break;
3783     case 3:
3784         addSlowPathGenerator(slowPathCall(
3785             slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
3786         break;
3787     default:
3788         RELEASE_ASSERT_NOT_REACHED();
3789         break;
3790     }
3791         
3792     cellResult(resultGPR, node);
3793 }
3794
3795 void SpeculativeJIT::compileArithClz32(Node* node)
3796 {
3797     ASSERT_WITH_MESSAGE(node->child1().useKind() == Int32Use || node->child1().useKind() == KnownInt32Use, "The Fixup phase should have enforced a Int32 operand.");
3798     SpeculateInt32Operand value(this, node->child1());
3799     GPRTemporary result(this, Reuse, value);
3800     GPRReg valueReg = value.gpr();
3801     GPRReg resultReg = result.gpr();
3802     m_jit.countLeadingZeros32(valueReg, resultReg);
3803     int32Result(resultReg, node);
3804 }
3805
3806 void SpeculativeJIT::compileArithSub(Node* node)
3807 {
3808     switch (node->binaryUseKind()) {
3809     case Int32Use: {
3810         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3811         
3812         if (node->child2()->isInt32Constant()) {
3813             SpeculateInt32Operand op1(this, node->child1());
3814             int32_t imm2 = node->child2()->asInt32();
3815             GPRTemporary result(this);
3816
3817             if (!shouldCheckOverflow(node->arithMode())) {
3818                 m_jit.move(op1.gpr(), result.gpr());
3819                 m_jit.sub32(Imm32(imm2), result.gpr());
3820             } else {
3821                 GPRTemporary scratch(this);
3822                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
3823             }
3824
3825             int32Result(result.gpr(), node);
3826             return;
3827         }
3828             
3829         if (node->child1()->isInt32Constant()) {
3830             int32_t imm1 = node->child1()->asInt32();
3831             SpeculateInt32Operand op2(this, node->child2());
3832             GPRTemporary result(this);
3833                 
3834             m_jit.move(Imm32(imm1), result.gpr());
3835             if (!shouldCheckOverflow(node->arithMode()))
3836                 m_jit.sub32(op2.gpr(), result.gpr());
3837             else
3838                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3839                 
3840             int32Result(result.gpr(), node);
3841             return;
3842         }
3843             
3844         SpeculateInt32Operand op1(this, node->child1());
3845         SpeculateInt32Operand op2(this, node->child2());
3846         GPRTemporary result(this);
3847
3848         if (!shouldCheckOverflow(node->arithMode())) {
3849             m_jit.move(op1.gpr(), result.gpr());
3850             m_jit.sub32(op2.gpr(), result.gpr());
3851         } else
3852             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
3853
3854         int32Result(result.gpr(), node);
3855         return;
3856     }
3857         
3858 #if USE(JSVALUE64)
3859     case Int52RepUse: {
3860         ASSERT(shouldCheckOverflow(node->arithMode()));
3861         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3862
3863         // Will we need an overflow check? If we can prove that neither input can be
3864         // Int52 then the overflow check will not be necessary.
3865         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52Only)
3866             && !m_state.forNode(node->child2()).couldBeType(SpecInt52Only)) {
3867             SpeculateWhicheverInt52Operand op1(this, node->child1());
3868             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3869             GPRTemporary result(this, Reuse, op1);
3870             m_jit.move(op1.gpr(), result.gpr());
3871             m_jit.sub64(op2.gpr(), result.gpr());
3872             int52Result(result.gpr(), node, op1.format());
3873             return;
3874         }
3875         
3876         SpeculateInt52Operand op1(this, node->child1());
3877         SpeculateInt52Operand op2(this, node->child2());
3878         GPRTemporary result(this);
3879         m_jit.move(op1.gpr(), result.gpr());
3880         speculationCheck(
3881             Int52Overflow, JSValueRegs(), 0,
3882             m_jit.branchSub64(