[JSC] DFG should not generate two jumps when the target of DoubleBranch is the next...
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGOSRExitFuzz.h"
38 #include "DFGSaneStringGetByValSlowPathGenerator.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DirectArguments.h"
41 #include "JITAddGenerator.h"
42 #include "JITBitAndGenerator.h"
43 #include "JITBitOrGenerator.h"
44 #include "JITBitXorGenerator.h"
45 #include "JITDivGenerator.h"
46 #include "JITLeftShiftGenerator.h"
47 #include "JITMulGenerator.h"
48 #include "JITRightShiftGenerator.h"
49 #include "JITSubGenerator.h"
50 #include "JSCInlines.h"
51 #include "JSEnvironmentRecord.h"
52 #include "JSGeneratorFunction.h"
53 #include "JSLexicalEnvironment.h"
54 #include "LinkBuffer.h"
55 #include "RegExpConstructor.h"
56 #include "ScopedArguments.h"
57 #include "ScratchRegisterAllocator.h"
58 #include "WriteBarrierBuffer.h"
59 #include <wtf/MathExtras.h>
60
61 namespace JSC { namespace DFG {
62
63 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
64     : m_compileOkay(true)
65     , m_jit(jit)
66     , m_currentNode(0)
67     , m_lastGeneratedNode(LastNodeType)
68     , m_indexInBlock(0)
69     , m_generationInfo(m_jit.graph().frameRegisterCount())
70     , m_state(m_jit.graph())
71     , m_interpreter(m_jit.graph(), m_state)
72     , m_stream(&jit.jitCode()->variableEventStream)
73     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
74 {
75 }
76
77 SpeculativeJIT::~SpeculativeJIT()
78 {
79 }
80
81 void SpeculativeJIT::emitAllocateRawObject(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements, unsigned vectorLength)
82 {
83     IndexingType indexingType = structure->indexingType();
84     bool hasIndexingHeader = hasIndexedProperties(indexingType);
85
86     unsigned inlineCapacity = structure->inlineCapacity();
87     unsigned outOfLineCapacity = structure->outOfLineCapacity();
88     
89     GPRTemporary scratch(this);
90     GPRTemporary scratch2(this);
91     GPRReg scratchGPR = scratch.gpr();
92     GPRReg scratch2GPR = scratch2.gpr();
93
94     ASSERT(vectorLength >= numElements);
95     vectorLength = std::max(BASE_VECTOR_LEN, vectorLength);
96     
97     JITCompiler::JumpList slowCases;
98
99     size_t size = 0;
100     if (hasIndexingHeader)
101         size += vectorLength * sizeof(JSValue) + sizeof(IndexingHeader);
102     size += outOfLineCapacity * sizeof(JSValue);
103
104     if (size) {
105         slowCases.append(
106             emitAllocateBasicStorage(TrustedImm32(size), storageGPR));
107         if (hasIndexingHeader)
108             m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
109         else
110             m_jit.addPtr(TrustedImm32(sizeof(IndexingHeader)), storageGPR);
111     } else
112         m_jit.move(TrustedImmPtr(0), storageGPR);
113
114     size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
115     MarkedAllocator* allocatorPtr = &m_jit.vm()->heap.allocatorForObjectWithoutDestructor(allocationSize);
116     m_jit.move(TrustedImmPtr(allocatorPtr), scratchGPR);
117     emitAllocateJSObject(resultGPR, scratchGPR, TrustedImmPtr(structure), storageGPR, scratch2GPR, slowCases);
118
119     if (hasIndexingHeader)
120         m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
121
122     // I want a slow path that also loads out the storage pointer, and that's
123     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
124     // of work for a very small piece of functionality. :-/
125     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
126         slowCases, this, operationNewRawObject, resultGPR, storageGPR,
127         structure, vectorLength));
128
129     if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
130 #if USE(JSVALUE64)
131         m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
132         for (unsigned i = numElements; i < vectorLength; ++i)
133             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
134 #else
135         EncodedValueDescriptor value;
136         value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
137         for (unsigned i = numElements; i < vectorLength; ++i) {
138             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
139             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
140         }
141 #endif
142     }
143     
144     if (hasIndexingHeader)
145         m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
146 }
147
148 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
149 {
150     if (inlineCallFrame && !inlineCallFrame->isVarargs())
151         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
152     else {
153         VirtualRegister argumentCountRegister;
154         if (!inlineCallFrame)
155             argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
156         else
157             argumentCountRegister = inlineCallFrame->argumentCountRegister;
158         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
159         if (!includeThis)
160             m_jit.sub32(TrustedImm32(1), lengthGPR);
161     }
162 }
163
164 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
165 {
166     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
167 }
168
169 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
170 {
171     if (origin.inlineCallFrame) {
172         if (origin.inlineCallFrame->isClosureCall) {
173             m_jit.loadPtr(
174                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
175                 calleeGPR);
176         } else {
177             m_jit.move(
178                 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
179                 calleeGPR);
180         }
181     } else
182         m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), calleeGPR);
183 }
184
185 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
186 {
187     m_jit.addPtr(
188         TrustedImm32(
189             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
190         GPRInfo::callFrameRegister, startGPR);
191 }
192
193 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
194 {
195     if (!doOSRExitFuzzing())
196         return MacroAssembler::Jump();
197     
198     MacroAssembler::Jump result;
199     
200     m_jit.pushToSave(GPRInfo::regT0);
201     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
202     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
203     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
204     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
205     unsigned at = Options::fireOSRExitFuzzAt();
206     if (at || atOrAfter) {
207         unsigned threshold;
208         MacroAssembler::RelationalCondition condition;
209         if (atOrAfter) {
210             threshold = atOrAfter;
211             condition = MacroAssembler::Below;
212         } else {
213             threshold = at;
214             condition = MacroAssembler::NotEqual;
215         }
216         MacroAssembler::Jump ok = m_jit.branch32(
217             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
218         m_jit.popToRestore(GPRInfo::regT0);
219         result = m_jit.jump();
220         ok.link(&m_jit);
221     }
222     m_jit.popToRestore(GPRInfo::regT0);
223     
224     return result;
225 }
226
227 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
228 {
229     if (!m_compileOkay)
230         return;
231     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
232     if (fuzzJump.isSet()) {
233         JITCompiler::JumpList jumpsToFail;
234         jumpsToFail.append(fuzzJump);
235         jumpsToFail.append(jumpToFail);
236         m_jit.appendExitInfo(jumpsToFail);
237     } else
238         m_jit.appendExitInfo(jumpToFail);
239     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
240 }
241
242 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
243 {
244     if (!m_compileOkay)
245         return;
246     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
247     if (fuzzJump.isSet()) {
248         JITCompiler::JumpList myJumpsToFail;
249         myJumpsToFail.append(jumpsToFail);
250         myJumpsToFail.append(fuzzJump);
251         m_jit.appendExitInfo(myJumpsToFail);
252     } else
253         m_jit.appendExitInfo(jumpsToFail);
254     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
255 }
256
257 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
258 {
259     if (!m_compileOkay)
260         return OSRExitJumpPlaceholder();
261     unsigned index = m_jit.jitCode()->osrExit.size();
262     m_jit.appendExitInfo();
263     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
264     return OSRExitJumpPlaceholder(index);
265 }
266
267 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
268 {
269     return speculationCheck(kind, jsValueSource, nodeUse.node());
270 }
271
272 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
273 {
274     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
275 }
276
277 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
278 {
279     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
280 }
281
282 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
283 {
284     if (!m_compileOkay)
285         return;
286     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
287     m_jit.appendExitInfo(jumpToFail);
288     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
289 }
290
291 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
292 {
293     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
294 }
295
296 void SpeculativeJIT::emitInvalidationPoint(Node* node)
297 {
298     if (!m_compileOkay)
299         return;
300     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
301     m_jit.jitCode()->appendOSRExit(OSRExit(
302         UncountableInvalidation, JSValueSource(),
303         m_jit.graph().methodOfGettingAValueProfileFor(node),
304         this, m_stream->size()));
305     info.m_replacementSource = m_jit.watchpointLabel();
306     ASSERT(info.m_replacementSource.isSet());
307     noResult(node);
308 }
309
310 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
311 {
312     if (!m_compileOkay)
313         return;
314     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
315     m_compileOkay = false;
316     if (verboseCompilationEnabled())
317         dataLog("Bailing compilation.\n");
318 }
319
320 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
321 {
322     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
323 }
324
325 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind exitKind)
326 {
327     ASSERT(needsTypeCheck(edge, typesPassedThrough));
328     m_interpreter.filter(edge, typesPassedThrough);
329     speculationCheck(exitKind, source, edge.node(), jumpToFail);
330 }
331
332 RegisterSet SpeculativeJIT::usedRegisters()
333 {
334     RegisterSet result;
335     
336     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
337         GPRReg gpr = GPRInfo::toRegister(i);
338         if (m_gprs.isInUse(gpr))
339             result.set(gpr);
340     }
341     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
342         FPRReg fpr = FPRInfo::toRegister(i);
343         if (m_fprs.isInUse(fpr))
344             result.set(fpr);
345     }
346     
347     result.merge(RegisterSet::stubUnavailableRegisters());
348     
349     return result;
350 }
351
352 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
353 {
354     m_slowPathGenerators.append(WTFMove(slowPathGenerator));
355 }
356
357 void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder)
358 {
359     for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i) {
360         pcToCodeOriginMapBuilder.appendItem(m_jit.label(), m_slowPathGenerators[i]->origin().semantic);
361         m_slowPathGenerators[i]->generate(this);
362     }
363 }
364
365 // On Windows we need to wrap fmod; on other platforms we can call it directly.
366 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
367 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
368 static double JIT_OPERATION fmodAsDFGOperation(double x, double y)
369 {
370     return fmod(x, y);
371 }
372 #else
373 #define fmodAsDFGOperation fmod
374 #endif
375
376 void SpeculativeJIT::clearGenerationInfo()
377 {
378     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
379         m_generationInfo[i] = GenerationInfo();
380     m_gprs = RegisterBank<GPRInfo>();
381     m_fprs = RegisterBank<FPRInfo>();
382 }
383
384 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
385 {
386     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
387     Node* node = info.node();
388     DataFormat registerFormat = info.registerFormat();
389     ASSERT(registerFormat != DataFormatNone);
390     ASSERT(registerFormat != DataFormatDouble);
391         
392     SilentSpillAction spillAction;
393     SilentFillAction fillAction;
394         
395     if (!info.needsSpill())
396         spillAction = DoNothingForSpill;
397     else {
398 #if USE(JSVALUE64)
399         ASSERT(info.gpr() == source);
400         if (registerFormat == DataFormatInt32)
401             spillAction = Store32Payload;
402         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
403             spillAction = StorePtr;
404         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
405             spillAction = Store64;
406         else {
407             ASSERT(registerFormat & DataFormatJS);
408             spillAction = Store64;
409         }
410 #elif USE(JSVALUE32_64)
411         if (registerFormat & DataFormatJS) {
412             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
413             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
414         } else {
415             ASSERT(info.gpr() == source);
416             spillAction = Store32Payload;
417         }
418 #endif
419     }
420         
421     if (registerFormat == DataFormatInt32) {
422         ASSERT(info.gpr() == source);
423         ASSERT(isJSInt32(info.registerFormat()));
424         if (node->hasConstant()) {
425             ASSERT(node->isInt32Constant());
426             fillAction = SetInt32Constant;
427         } else
428             fillAction = Load32Payload;
429     } else if (registerFormat == DataFormatBoolean) {
430 #if USE(JSVALUE64)
431         RELEASE_ASSERT_NOT_REACHED();
432 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
433         fillAction = DoNothingForFill;
434 #endif
435 #elif USE(JSVALUE32_64)
436         ASSERT(info.gpr() == source);
437         if (node->hasConstant()) {
438             ASSERT(node->isBooleanConstant());
439             fillAction = SetBooleanConstant;
440         } else
441             fillAction = Load32Payload;
442 #endif
443     } else if (registerFormat == DataFormatCell) {
444         ASSERT(info.gpr() == source);
445         if (node->hasConstant()) {
446             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
447             node->asCell(); // To get the assertion.
448             fillAction = SetCellConstant;
449         } else {
450 #if USE(JSVALUE64)
451             fillAction = LoadPtr;
452 #else
453             fillAction = Load32Payload;
454 #endif
455         }
456     } else if (registerFormat == DataFormatStorage) {
457         ASSERT(info.gpr() == source);
458         fillAction = LoadPtr;
459     } else if (registerFormat == DataFormatInt52) {
460         if (node->hasConstant())
461             fillAction = SetInt52Constant;
462         else if (info.spillFormat() == DataFormatInt52)
463             fillAction = Load64;
464         else if (info.spillFormat() == DataFormatStrictInt52)
465             fillAction = Load64ShiftInt52Left;
466         else if (info.spillFormat() == DataFormatNone)
467             fillAction = Load64;
468         else {
469             RELEASE_ASSERT_NOT_REACHED();
470 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
471             fillAction = Load64; // Make GCC happy.
472 #endif
473         }
474     } else if (registerFormat == DataFormatStrictInt52) {
475         if (node->hasConstant())
476             fillAction = SetStrictInt52Constant;
477         else if (info.spillFormat() == DataFormatInt52)
478             fillAction = Load64ShiftInt52Right;
479         else if (info.spillFormat() == DataFormatStrictInt52)
480             fillAction = Load64;
481         else if (info.spillFormat() == DataFormatNone)
482             fillAction = Load64;
483         else {
484             RELEASE_ASSERT_NOT_REACHED();
485 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
486             fillAction = Load64; // Make GCC happy.
487 #endif
488         }
489     } else {
490         ASSERT(registerFormat & DataFormatJS);
491 #if USE(JSVALUE64)
492         ASSERT(info.gpr() == source);
493         if (node->hasConstant()) {
494             if (node->isCellConstant())
495                 fillAction = SetTrustedJSConstant;
496             else
497                 fillAction = SetJSConstant;
498         } else if (info.spillFormat() == DataFormatInt32) {
499             ASSERT(registerFormat == DataFormatJSInt32);
500             fillAction = Load32PayloadBoxInt;
501         } else
502             fillAction = Load64;
503 #else
504         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
505         if (node->hasConstant())
506             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
507         else if (info.payloadGPR() == source)
508             fillAction = Load32Payload;
509         else { // Fill the Tag
510             switch (info.spillFormat()) {
511             case DataFormatInt32:
512                 ASSERT(registerFormat == DataFormatJSInt32);
513                 fillAction = SetInt32Tag;
514                 break;
515             case DataFormatCell:
516                 ASSERT(registerFormat == DataFormatJSCell);
517                 fillAction = SetCellTag;
518                 break;
519             case DataFormatBoolean:
520                 ASSERT(registerFormat == DataFormatJSBoolean);
521                 fillAction = SetBooleanTag;
522                 break;
523             default:
524                 fillAction = Load32Tag;
525                 break;
526             }
527         }
528 #endif
529     }
530         
531     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
532 }
533     
534 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
535 {
536     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
537     Node* node = info.node();
538     ASSERT(info.registerFormat() == DataFormatDouble);
539
540     SilentSpillAction spillAction;
541     SilentFillAction fillAction;
542         
543     if (!info.needsSpill())
544         spillAction = DoNothingForSpill;
545     else {
546         ASSERT(!node->hasConstant());
547         ASSERT(info.spillFormat() == DataFormatNone);
548         ASSERT(info.fpr() == source);
549         spillAction = StoreDouble;
550     }
551         
552 #if USE(JSVALUE64)
553     if (node->hasConstant()) {
554         node->asNumber(); // To get the assertion.
555         fillAction = SetDoubleConstant;
556     } else {
557         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
558         fillAction = LoadDouble;
559     }
560 #elif USE(JSVALUE32_64)
561     ASSERT(info.registerFormat() == DataFormatDouble);
562     if (node->hasConstant()) {
563         node->asNumber(); // To get the assertion.
564         fillAction = SetDoubleConstant;
565     } else
566         fillAction = LoadDouble;
567 #endif
568
569     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
570 }
571     
572 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
573 {
574     switch (plan.spillAction()) {
575     case DoNothingForSpill:
576         break;
577     case Store32Tag:
578         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
579         break;
580     case Store32Payload:
581         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
582         break;
583     case StorePtr:
584         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
585         break;
586 #if USE(JSVALUE64)
587     case Store64:
588         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
589         break;
590 #endif
591     case StoreDouble:
592         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
593         break;
594     default:
595         RELEASE_ASSERT_NOT_REACHED();
596     }
597 }
598     
599 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
600 {
601 #if USE(JSVALUE32_64)
602     UNUSED_PARAM(canTrample);
603 #endif
604     switch (plan.fillAction()) {
605     case DoNothingForFill:
606         break;
607     case SetInt32Constant:
608         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
609         break;
610 #if USE(JSVALUE64)
611     case SetInt52Constant:
612         m_jit.move(Imm64(plan.node()->asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
613         break;
614     case SetStrictInt52Constant:
615         m_jit.move(Imm64(plan.node()->asMachineInt()), plan.gpr());
616         break;
617 #endif // USE(JSVALUE64)
618     case SetBooleanConstant:
619         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
620         break;
621     case SetCellConstant:
622         m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
623         break;
624 #if USE(JSVALUE64)
625     case SetTrustedJSConstant:
626         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
627         break;
628     case SetJSConstant:
629         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
630         break;
631     case SetDoubleConstant:
632         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
633         m_jit.move64ToDouble(canTrample, plan.fpr());
634         break;
635     case Load32PayloadBoxInt:
636         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
637         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
638         break;
639     case Load32PayloadConvertToInt52:
640         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
641         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
642         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
643         break;
644     case Load32PayloadSignExtend:
645         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
646         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
647         break;
648 #else
649     case SetJSConstantTag:
650         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
651         break;
652     case SetJSConstantPayload:
653         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
654         break;
655     case SetInt32Tag:
656         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
657         break;
658     case SetCellTag:
659         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
660         break;
661     case SetBooleanTag:
662         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
663         break;
664     case SetDoubleConstant:
665         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
666         break;
667 #endif
668     case Load32Tag:
669         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
670         break;
671     case Load32Payload:
672         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
673         break;
674     case LoadPtr:
675         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
676         break;
677 #if USE(JSVALUE64)
678     case Load64:
679         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
680         break;
681     case Load64ShiftInt52Right:
682         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
683         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
684         break;
685     case Load64ShiftInt52Left:
686         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
687         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
688         break;
689 #endif
690     case LoadDouble:
691         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
692         break;
693     default:
694         RELEASE_ASSERT_NOT_REACHED();
695     }
696 }
697     
698 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
699 {
700     switch (arrayMode.arrayClass()) {
701     case Array::OriginalArray: {
702         CRASH();
703 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
704         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
705         return result;
706 #endif
707     }
708         
709     case Array::Array:
710         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
711         return m_jit.branch32(
712             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
713         
714     case Array::NonArray:
715     case Array::OriginalNonArray:
716         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
717         return m_jit.branch32(
718             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
719         
720     case Array::PossiblyArray:
721         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
722         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
723     }
724     
725     RELEASE_ASSERT_NOT_REACHED();
726     return JITCompiler::Jump();
727 }
728
729 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
730 {
731     JITCompiler::JumpList result;
732     
733     switch (arrayMode.type()) {
734     case Array::Int32:
735         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
736
737     case Array::Double:
738         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
739
740     case Array::Contiguous:
741         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
742
743     case Array::Undecided:
744         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
745
746     case Array::ArrayStorage:
747     case Array::SlowPutArrayStorage: {
748         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
749         
750         if (arrayMode.isJSArray()) {
751             if (arrayMode.isSlowPut()) {
752                 result.append(
753                     m_jit.branchTest32(
754                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
755                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
756                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
757                 result.append(
758                     m_jit.branch32(
759                         MacroAssembler::Above, tempGPR,
760                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
761                 break;
762             }
763             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
764             result.append(
765                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
766             break;
767         }
768         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
769         if (arrayMode.isSlowPut()) {
770             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
771             result.append(
772                 m_jit.branch32(
773                     MacroAssembler::Above, tempGPR,
774                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
775             break;
776         }
777         result.append(
778             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
779         break;
780     }
781     default:
782         CRASH();
783         break;
784     }
785     
786     return result;
787 }
788
789 void SpeculativeJIT::checkArray(Node* node)
790 {
791     ASSERT(node->arrayMode().isSpecific());
792     ASSERT(!node->arrayMode().doesConversion());
793     
794     SpeculateCellOperand base(this, node->child1());
795     GPRReg baseReg = base.gpr();
796     
797     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
798         noResult(m_currentNode);
799         return;
800     }
801     
802     const ClassInfo* expectedClassInfo = 0;
803     
804     switch (node->arrayMode().type()) {
805     case Array::AnyTypedArray:
806     case Array::String:
807         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
808         break;
809     case Array::Int32:
810     case Array::Double:
811     case Array::Contiguous:
812     case Array::Undecided:
813     case Array::ArrayStorage:
814     case Array::SlowPutArrayStorage: {
815         GPRTemporary temp(this);
816         GPRReg tempGPR = temp.gpr();
817         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
818         speculationCheck(
819             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
820             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
821         
822         noResult(m_currentNode);
823         return;
824     }
825     case Array::DirectArguments:
826         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
827         noResult(m_currentNode);
828         return;
829     case Array::ScopedArguments:
830         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
831         noResult(m_currentNode);
832         return;
833     default:
834         speculateCellTypeWithoutTypeFiltering(
835             node->child1(), baseReg,
836             typeForTypedArrayType(node->arrayMode().typedArrayType()));
837         noResult(m_currentNode);
838         return;
839     }
840     
841     RELEASE_ASSERT(expectedClassInfo);
842     
843     GPRTemporary temp(this);
844     GPRTemporary temp2(this);
845     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
846     speculationCheck(
847         BadType, JSValueSource::unboxedCell(baseReg), node,
848         m_jit.branchPtr(
849             MacroAssembler::NotEqual,
850             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
851             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
852     
853     noResult(m_currentNode);
854 }
855
856 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
857 {
858     ASSERT(node->arrayMode().doesConversion());
859     
860     GPRTemporary temp(this);
861     GPRTemporary structure;
862     GPRReg tempGPR = temp.gpr();
863     GPRReg structureGPR = InvalidGPRReg;
864     
865     if (node->op() != ArrayifyToStructure) {
866         GPRTemporary realStructure(this);
867         structure.adopt(realStructure);
868         structureGPR = structure.gpr();
869     }
870         
871     // We can skip all that comes next if we already have array storage.
872     MacroAssembler::JumpList slowPath;
873     
874     if (node->op() == ArrayifyToStructure) {
875         slowPath.append(m_jit.branchWeakStructure(
876             JITCompiler::NotEqual,
877             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
878             node->structure()));
879     } else {
880         m_jit.load8(
881             MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
882         
883         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
884     }
885     
886     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
887         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
888     
889     noResult(m_currentNode);
890 }
891
892 void SpeculativeJIT::arrayify(Node* node)
893 {
894     ASSERT(node->arrayMode().isSpecific());
895     
896     SpeculateCellOperand base(this, node->child1());
897     
898     if (!node->child2()) {
899         arrayify(node, base.gpr(), InvalidGPRReg);
900         return;
901     }
902     
903     SpeculateInt32Operand property(this, node->child2());
904     
905     arrayify(node, base.gpr(), property.gpr());
906 }
907
908 GPRReg SpeculativeJIT::fillStorage(Edge edge)
909 {
910     VirtualRegister virtualRegister = edge->virtualRegister();
911     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
912     
913     switch (info.registerFormat()) {
914     case DataFormatNone: {
915         if (info.spillFormat() == DataFormatStorage) {
916             GPRReg gpr = allocate();
917             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
918             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
919             info.fillStorage(*m_stream, gpr);
920             return gpr;
921         }
922         
923         // Must be a cell; fill it as a cell and then return the pointer.
924         return fillSpeculateCell(edge);
925     }
926         
927     case DataFormatStorage: {
928         GPRReg gpr = info.gpr();
929         m_gprs.lock(gpr);
930         return gpr;
931     }
932         
933     default:
934         return fillSpeculateCell(edge);
935     }
936 }
937
938 void SpeculativeJIT::useChildren(Node* node)
939 {
940     if (node->flags() & NodeHasVarArgs) {
941         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
942             if (!!m_jit.graph().m_varArgChildren[childIdx])
943                 use(m_jit.graph().m_varArgChildren[childIdx]);
944         }
945     } else {
946         Edge child1 = node->child1();
947         if (!child1) {
948             ASSERT(!node->child2() && !node->child3());
949             return;
950         }
951         use(child1);
952         
953         Edge child2 = node->child2();
954         if (!child2) {
955             ASSERT(!node->child3());
956             return;
957         }
958         use(child2);
959         
960         Edge child3 = node->child3();
961         if (!child3)
962             return;
963         use(child3);
964     }
965 }
966
967 void SpeculativeJIT::compileTryGetById(Node* node)
968 {
969     switch (node->child1().useKind()) {
970     case CellUse: {
971         SpeculateCellOperand base(this, node->child1());
972         JSValueRegsTemporary result(this, Reuse, base);
973
974         JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
975         JSValueRegs resultRegs = result.regs();
976
977         base.use();
978
979         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), JITCompiler::Jump(), DontSpill, AccessType::GetPure);
980
981         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
982         break;
983     }
984
985     case UntypedUse: {
986         JSValueOperand base(this, node->child1());
987         JSValueRegsTemporary result(this, Reuse, base);
988
989         JSValueRegs baseRegs = base.jsValueRegs();
990         JSValueRegs resultRegs = result.regs();
991
992         base.use();
993
994         JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
995
996         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), notCell, DontSpill, AccessType::GetPure);
997
998         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
999         break;
1000     }
1001
1002     default:
1003         DFG_CRASH(m_jit.graph(), node, "Bad use kind");
1004         break;
1005     } 
1006 }
1007
1008 void SpeculativeJIT::compileIn(Node* node)
1009 {
1010     SpeculateCellOperand base(this, node->child2());
1011     GPRReg baseGPR = base.gpr();
1012     
1013     if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
1014         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
1015             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
1016             
1017             GPRTemporary result(this);
1018             GPRReg resultGPR = result.gpr();
1019
1020             use(node->child1());
1021             
1022             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
1023             MacroAssembler::Label done = m_jit.label();
1024             
1025             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
1026             // we can cast it to const AtomicStringImpl* safely.
1027             auto slowPath = slowPathCall(
1028                 jump.m_jump, this, operationInOptimize,
1029                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
1030                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
1031             
1032             stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
1033             stubInfo->codeOrigin = node->origin.semantic;
1034             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
1035             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
1036 #if USE(JSVALUE32_64)
1037             stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
1038             stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
1039 #endif
1040             stubInfo->patch.usedRegisters = usedRegisters();
1041
1042             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
1043             addSlowPathGenerator(WTFMove(slowPath));
1044
1045             base.use();
1046
1047             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1048             return;
1049         }
1050     }
1051
1052     JSValueOperand key(this, node->child1());
1053     JSValueRegs regs = key.jsValueRegs();
1054         
1055     GPRFlushedCallResult result(this);
1056     GPRReg resultGPR = result.gpr();
1057         
1058     base.use();
1059     key.use();
1060         
1061     flushRegisters();
1062     callOperation(
1063         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
1064         baseGPR, regs);
1065     m_jit.exceptionCheck();
1066     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1067 }
1068
1069 void SpeculativeJIT::compileDeleteById(Node* node)
1070 {
1071     JSValueOperand value(this, node->child1());
1072     GPRFlushedCallResult result(this);
1073
1074     JSValueRegs valueRegs = value.jsValueRegs();
1075     GPRReg resultGPR = result.gpr();
1076
1077     value.use();
1078
1079     flushRegisters();
1080     callOperation(operationDeleteById, resultGPR, valueRegs, identifierUID(node->identifierNumber()));
1081     m_jit.exceptionCheck();
1082
1083     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1084 }
1085
1086 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1087 {
1088     unsigned branchIndexInBlock = detectPeepHoleBranch();
1089     if (branchIndexInBlock != UINT_MAX) {
1090         Node* branchNode = m_block->at(branchIndexInBlock);
1091
1092         ASSERT(node->adjustedRefCount() == 1);
1093         
1094         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1095     
1096         m_indexInBlock = branchIndexInBlock;
1097         m_currentNode = branchNode;
1098         
1099         return true;
1100     }
1101     
1102     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1103     
1104     return false;
1105 }
1106
1107 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1108 {
1109     unsigned branchIndexInBlock = detectPeepHoleBranch();
1110     if (branchIndexInBlock != UINT_MAX) {
1111         Node* branchNode = m_block->at(branchIndexInBlock);
1112
1113         ASSERT(node->adjustedRefCount() == 1);
1114         
1115         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1116     
1117         m_indexInBlock = branchIndexInBlock;
1118         m_currentNode = branchNode;
1119         
1120         return true;
1121     }
1122     
1123     nonSpeculativeNonPeepholeStrictEq(node, invert);
1124     
1125     return false;
1126 }
1127
1128 static const char* dataFormatString(DataFormat format)
1129 {
1130     // These values correspond to the DataFormat enum.
1131     const char* strings[] = {
1132         "[  ]",
1133         "[ i]",
1134         "[ d]",
1135         "[ c]",
1136         "Err!",
1137         "Err!",
1138         "Err!",
1139         "Err!",
1140         "[J ]",
1141         "[Ji]",
1142         "[Jd]",
1143         "[Jc]",
1144         "Err!",
1145         "Err!",
1146         "Err!",
1147         "Err!",
1148     };
1149     return strings[format];
1150 }
1151
1152 void SpeculativeJIT::dump(const char* label)
1153 {
1154     if (label)
1155         dataLogF("<%s>\n", label);
1156
1157     dataLogF("  gprs:\n");
1158     m_gprs.dump();
1159     dataLogF("  fprs:\n");
1160     m_fprs.dump();
1161     dataLogF("  VirtualRegisters:\n");
1162     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1163         GenerationInfo& info = m_generationInfo[i];
1164         if (info.alive())
1165             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1166         else
1167             dataLogF("    % 3d:[__][__]", i);
1168         if (info.registerFormat() == DataFormatDouble)
1169             dataLogF(":fpr%d\n", info.fpr());
1170         else if (info.registerFormat() != DataFormatNone
1171 #if USE(JSVALUE32_64)
1172             && !(info.registerFormat() & DataFormatJS)
1173 #endif
1174             ) {
1175             ASSERT(info.gpr() != InvalidGPRReg);
1176             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1177         } else
1178             dataLogF("\n");
1179     }
1180     if (label)
1181         dataLogF("</%s>\n", label);
1182 }
1183
1184 GPRTemporary::GPRTemporary()
1185     : m_jit(0)
1186     , m_gpr(InvalidGPRReg)
1187 {
1188 }
1189
1190 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1191     : m_jit(jit)
1192     , m_gpr(InvalidGPRReg)
1193 {
1194     m_gpr = m_jit->allocate();
1195 }
1196
1197 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1198     : m_jit(jit)
1199     , m_gpr(InvalidGPRReg)
1200 {
1201     m_gpr = m_jit->allocate(specific);
1202 }
1203
1204 #if USE(JSVALUE32_64)
1205 GPRTemporary::GPRTemporary(
1206     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1207     : m_jit(jit)
1208     , m_gpr(InvalidGPRReg)
1209 {
1210     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1211         m_gpr = m_jit->reuse(op1.gpr(which));
1212     else
1213         m_gpr = m_jit->allocate();
1214 }
1215 #endif // USE(JSVALUE32_64)
1216
1217 JSValueRegsTemporary::JSValueRegsTemporary() { }
1218
1219 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1220 #if USE(JSVALUE64)
1221     : m_gpr(jit)
1222 #else
1223     : m_payloadGPR(jit)
1224     , m_tagGPR(jit)
1225 #endif
1226 {
1227 }
1228
1229 #if USE(JSVALUE64)
1230 template<typename T>
1231 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord)
1232     : m_gpr(jit, Reuse, operand)
1233 {
1234 }
1235 #else
1236 template<typename T>
1237 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord resultWord)
1238 {
1239     if (resultWord == PayloadWord) {
1240         m_payloadGPR = GPRTemporary(jit, Reuse, operand);
1241         m_tagGPR = GPRTemporary(jit);
1242     } else {
1243         m_payloadGPR = GPRTemporary(jit);
1244         m_tagGPR = GPRTemporary(jit, Reuse, operand);
1245     }
1246 }
1247 #endif
1248
1249 #if USE(JSVALUE64)
1250 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1251 {
1252     m_gpr = GPRTemporary(jit, Reuse, operand);
1253 }
1254 #else
1255 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1256 {
1257     if (jit->canReuse(operand.node())) {
1258         m_payloadGPR = GPRTemporary(jit, Reuse, operand, PayloadWord);
1259         m_tagGPR = GPRTemporary(jit, Reuse, operand, TagWord);
1260     } else {
1261         m_payloadGPR = GPRTemporary(jit);
1262         m_tagGPR = GPRTemporary(jit);
1263     }
1264 }
1265 #endif
1266
1267 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1268
1269 JSValueRegs JSValueRegsTemporary::regs()
1270 {
1271 #if USE(JSVALUE64)
1272     return JSValueRegs(m_gpr.gpr());
1273 #else
1274     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1275 #endif
1276 }
1277
1278 void GPRTemporary::adopt(GPRTemporary& other)
1279 {
1280     ASSERT(!m_jit);
1281     ASSERT(m_gpr == InvalidGPRReg);
1282     ASSERT(other.m_jit);
1283     ASSERT(other.m_gpr != InvalidGPRReg);
1284     m_jit = other.m_jit;
1285     m_gpr = other.m_gpr;
1286     other.m_jit = 0;
1287     other.m_gpr = InvalidGPRReg;
1288 }
1289
1290 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1291     : m_jit(jit)
1292     , m_fpr(InvalidFPRReg)
1293 {
1294     m_fpr = m_jit->fprAllocate();
1295 }
1296
1297 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1298     : m_jit(jit)
1299     , m_fpr(InvalidFPRReg)
1300 {
1301     if (m_jit->canReuse(op1.node()))
1302         m_fpr = m_jit->reuse(op1.fpr());
1303     else
1304         m_fpr = m_jit->fprAllocate();
1305 }
1306
1307 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1308     : m_jit(jit)
1309     , m_fpr(InvalidFPRReg)
1310 {
1311     if (m_jit->canReuse(op1.node()))
1312         m_fpr = m_jit->reuse(op1.fpr());
1313     else if (m_jit->canReuse(op2.node()))
1314         m_fpr = m_jit->reuse(op2.fpr());
1315     else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1316         m_fpr = m_jit->reuse(op1.fpr());
1317     else
1318         m_fpr = m_jit->fprAllocate();
1319 }
1320
1321 #if USE(JSVALUE32_64)
1322 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1323     : m_jit(jit)
1324     , m_fpr(InvalidFPRReg)
1325 {
1326     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1327         m_fpr = m_jit->reuse(op1.fpr());
1328     else
1329         m_fpr = m_jit->fprAllocate();
1330 }
1331 #endif
1332
1333 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1334 {
1335     BasicBlock* taken = branchNode->branchData()->taken.block;
1336     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1337
1338     if (taken == nextBlock()) {
1339         condition = MacroAssembler::invert(condition);
1340         std::swap(taken, notTaken);
1341     }
1342
1343     SpeculateDoubleOperand op1(this, node->child1());
1344     SpeculateDoubleOperand op2(this, node->child2());
1345     
1346     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1347     jump(notTaken);
1348 }
1349
1350 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1351 {
1352     BasicBlock* taken = branchNode->branchData()->taken.block;
1353     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1354
1355     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1356     
1357     if (taken == nextBlock()) {
1358         condition = MacroAssembler::NotEqual;
1359         BasicBlock* tmp = taken;
1360         taken = notTaken;
1361         notTaken = tmp;
1362     }
1363
1364     SpeculateCellOperand op1(this, node->child1());
1365     SpeculateCellOperand op2(this, node->child2());
1366     
1367     GPRReg op1GPR = op1.gpr();
1368     GPRReg op2GPR = op2.gpr();
1369     
1370     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1371         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1372             speculationCheck(
1373                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1374         }
1375         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1376             speculationCheck(
1377                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1378         }
1379     } else {
1380         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1381             speculationCheck(
1382                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1383                 m_jit.branchIfNotObject(op1GPR));
1384         }
1385         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1386             m_jit.branchTest8(
1387                 MacroAssembler::NonZero, 
1388                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1389                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1390
1391         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1392             speculationCheck(
1393                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1394                 m_jit.branchIfNotObject(op2GPR));
1395         }
1396         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1397             m_jit.branchTest8(
1398                 MacroAssembler::NonZero, 
1399                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1400                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1401     }
1402
1403     branchPtr(condition, op1GPR, op2GPR, taken);
1404     jump(notTaken);
1405 }
1406
1407 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1408 {
1409     BasicBlock* taken = branchNode->branchData()->taken.block;
1410     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1411
1412     // The branch instruction will branch to the taken block.
1413     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1414     if (taken == nextBlock()) {
1415         condition = JITCompiler::invert(condition);
1416         BasicBlock* tmp = taken;
1417         taken = notTaken;
1418         notTaken = tmp;
1419     }
1420
1421     if (node->child1()->isInt32Constant()) {
1422         int32_t imm = node->child1()->asInt32();
1423         SpeculateBooleanOperand op2(this, node->child2());
1424         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1425     } else if (node->child2()->isInt32Constant()) {
1426         SpeculateBooleanOperand op1(this, node->child1());
1427         int32_t imm = node->child2()->asInt32();
1428         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1429     } else {
1430         SpeculateBooleanOperand op1(this, node->child1());
1431         SpeculateBooleanOperand op2(this, node->child2());
1432         branch32(condition, op1.gpr(), op2.gpr(), taken);
1433     }
1434
1435     jump(notTaken);
1436 }
1437
1438 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1439 {
1440     BasicBlock* taken = branchNode->branchData()->taken.block;
1441     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1442
1443     // The branch instruction will branch to the taken block.
1444     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1445     if (taken == nextBlock()) {
1446         condition = JITCompiler::invert(condition);
1447         BasicBlock* tmp = taken;
1448         taken = notTaken;
1449         notTaken = tmp;
1450     }
1451
1452     if (node->child1()->isInt32Constant()) {
1453         int32_t imm = node->child1()->asInt32();
1454         SpeculateInt32Operand op2(this, node->child2());
1455         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1456     } else if (node->child2()->isInt32Constant()) {
1457         SpeculateInt32Operand op1(this, node->child1());
1458         int32_t imm = node->child2()->asInt32();
1459         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1460     } else {
1461         SpeculateInt32Operand op1(this, node->child1());
1462         SpeculateInt32Operand op2(this, node->child2());
1463         branch32(condition, op1.gpr(), op2.gpr(), taken);
1464     }
1465
1466     jump(notTaken);
1467 }
1468
1469 // Returns true if the compare is fused with a subsequent branch.
1470 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1471 {
1472     // Fused compare & branch.
1473     unsigned branchIndexInBlock = detectPeepHoleBranch();
1474     if (branchIndexInBlock != UINT_MAX) {
1475         Node* branchNode = m_block->at(branchIndexInBlock);
1476
1477         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1478         // so can be no intervening nodes to also reference the compare. 
1479         ASSERT(node->adjustedRefCount() == 1);
1480
1481         if (node->isBinaryUseKind(Int32Use))
1482             compilePeepHoleInt32Branch(node, branchNode, condition);
1483 #if USE(JSVALUE64)
1484         else if (node->isBinaryUseKind(Int52RepUse))
1485             compilePeepHoleInt52Branch(node, branchNode, condition);
1486 #endif // USE(JSVALUE64)
1487         else if (node->isBinaryUseKind(DoubleRepUse))
1488             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1489         else if (node->op() == CompareEq) {
1490             if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1491                 // Use non-peephole comparison, for now.
1492                 return false;
1493             }
1494             if (node->isBinaryUseKind(BooleanUse))
1495                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1496             else if (node->isBinaryUseKind(SymbolUse))
1497                 compilePeepHoleSymbolEquality(node, branchNode);
1498             else if (node->isBinaryUseKind(ObjectUse))
1499                 compilePeepHoleObjectEquality(node, branchNode);
1500             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1501                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1502             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1503                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1504             else if (!needsTypeCheck(node->child1(), SpecOther))
1505                 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1506             else if (!needsTypeCheck(node->child2(), SpecOther))
1507                 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1508             else {
1509                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1510                 return true;
1511             }
1512         } else {
1513             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1514             return true;
1515         }
1516
1517         use(node->child1());
1518         use(node->child2());
1519         m_indexInBlock = branchIndexInBlock;
1520         m_currentNode = branchNode;
1521         return true;
1522     }
1523     return false;
1524 }
1525
1526 void SpeculativeJIT::noticeOSRBirth(Node* node)
1527 {
1528     if (!node->hasVirtualRegister())
1529         return;
1530     
1531     VirtualRegister virtualRegister = node->virtualRegister();
1532     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1533     
1534     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1535 }
1536
1537 void SpeculativeJIT::compileMovHint(Node* node)
1538 {
1539     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1540     
1541     Node* child = node->child1().node();
1542     noticeOSRBirth(child);
1543     
1544     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1545 }
1546
1547 void SpeculativeJIT::bail(AbortReason reason)
1548 {
1549     if (verboseCompilationEnabled())
1550         dataLog("Bailing compilation.\n");
1551     m_compileOkay = true;
1552     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1553     clearGenerationInfo();
1554 }
1555
1556 void SpeculativeJIT::compileCurrentBlock()
1557 {
1558     ASSERT(m_compileOkay);
1559     
1560     if (!m_block)
1561         return;
1562     
1563     ASSERT(m_block->isReachable);
1564     
1565     m_jit.blockHeads()[m_block->index] = m_jit.label();
1566
1567     if (!m_block->intersectionOfCFAHasVisited) {
1568         // Don't generate code for basic blocks that are unreachable according to CFA.
1569         // But to be sure that nobody has generated a jump to this block, drop in a
1570         // breakpoint here.
1571         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1572         return;
1573     }
1574
1575     m_stream->appendAndLog(VariableEvent::reset());
1576     
1577     m_jit.jitAssertHasValidCallFrame();
1578     m_jit.jitAssertTagsInPlace();
1579     m_jit.jitAssertArgumentCountSane();
1580
1581     m_state.reset();
1582     m_state.beginBasicBlock(m_block);
1583     
1584     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1585         int operand = m_block->variablesAtHead.operandForIndex(i);
1586         Node* node = m_block->variablesAtHead[i];
1587         if (!node)
1588             continue; // No need to record dead SetLocal's.
1589         
1590         VariableAccessData* variable = node->variableAccessData();
1591         DataFormat format;
1592         if (!node->refCount())
1593             continue; // No need to record dead SetLocal's.
1594         format = dataFormatFor(variable->flushFormat());
1595         m_stream->appendAndLog(
1596             VariableEvent::setLocal(
1597                 VirtualRegister(operand),
1598                 variable->machineLocal(),
1599                 format));
1600     }
1601
1602     m_origin = NodeOrigin();
1603     
1604     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1605         m_currentNode = m_block->at(m_indexInBlock);
1606         
1607         // We may have hit a contradiction that the CFA was aware of but that the JIT
1608         // didn't cause directly.
1609         if (!m_state.isValid()) {
1610             bail(DFGBailedAtTopOfBlock);
1611             return;
1612         }
1613
1614         m_interpreter.startExecuting();
1615         m_jit.setForNode(m_currentNode);
1616         m_origin = m_currentNode->origin;
1617         if (validationEnabled())
1618             m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1619         m_lastGeneratedNode = m_currentNode->op();
1620         
1621         ASSERT(m_currentNode->shouldGenerate());
1622         
1623         if (verboseCompilationEnabled()) {
1624             dataLogF(
1625                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1626                 (int)m_currentNode->index(),
1627                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1628             dataLog("\n");
1629         }
1630
1631         if (Options::validateDFGExceptionHandling() && (mayExit(m_jit.graph(), m_currentNode) != DoesNotExit || m_currentNode->isTerminal()))
1632             m_jit.jitReleaseAssertNoException();
1633
1634         m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.label(), m_origin.semantic);
1635
1636         compile(m_currentNode);
1637         
1638         if (belongsInMinifiedGraph(m_currentNode->op()))
1639             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1640         
1641 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1642         m_jit.clearRegisterAllocationOffsets();
1643 #endif
1644         
1645         if (!m_compileOkay) {
1646             bail(DFGBailedAtEndOfNode);
1647             return;
1648         }
1649         
1650         // Make sure that the abstract state is rematerialized for the next node.
1651         m_interpreter.executeEffects(m_indexInBlock);
1652     }
1653     
1654     // Perform the most basic verification that children have been used correctly.
1655     if (!ASSERT_DISABLED) {
1656         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1657             GenerationInfo& info = m_generationInfo[index];
1658             RELEASE_ASSERT(!info.alive());
1659         }
1660     }
1661 }
1662
1663 // If we are making type predictions about our arguments then
1664 // we need to check that they are correct on function entry.
1665 void SpeculativeJIT::checkArgumentTypes()
1666 {
1667     ASSERT(!m_currentNode);
1668     m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1669
1670     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1671         Node* node = m_jit.graph().m_arguments[i];
1672         if (!node) {
1673             // The argument is dead. We don't do any checks for such arguments.
1674             continue;
1675         }
1676         
1677         ASSERT(node->op() == SetArgument);
1678         ASSERT(node->shouldGenerate());
1679
1680         VariableAccessData* variableAccessData = node->variableAccessData();
1681         FlushFormat format = variableAccessData->flushFormat();
1682         
1683         if (format == FlushedJSValue)
1684             continue;
1685         
1686         VirtualRegister virtualRegister = variableAccessData->local();
1687
1688         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1689         
1690 #if USE(JSVALUE64)
1691         switch (format) {
1692         case FlushedInt32: {
1693             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1694             break;
1695         }
1696         case FlushedBoolean: {
1697             GPRTemporary temp(this);
1698             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1699             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1700             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1701             break;
1702         }
1703         case FlushedCell: {
1704             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1705             break;
1706         }
1707         default:
1708             RELEASE_ASSERT_NOT_REACHED();
1709             break;
1710         }
1711 #else
1712         switch (format) {
1713         case FlushedInt32: {
1714             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1715             break;
1716         }
1717         case FlushedBoolean: {
1718             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1719             break;
1720         }
1721         case FlushedCell: {
1722             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1723             break;
1724         }
1725         default:
1726             RELEASE_ASSERT_NOT_REACHED();
1727             break;
1728         }
1729 #endif
1730     }
1731
1732     m_origin = NodeOrigin();
1733 }
1734
1735 bool SpeculativeJIT::compile()
1736 {
1737     checkArgumentTypes();
1738     
1739     ASSERT(!m_currentNode);
1740     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1741         m_jit.setForBlockIndex(blockIndex);
1742         m_block = m_jit.graph().block(blockIndex);
1743         compileCurrentBlock();
1744     }
1745     linkBranches();
1746     return true;
1747 }
1748
1749 void SpeculativeJIT::createOSREntries()
1750 {
1751     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1752         BasicBlock* block = m_jit.graph().block(blockIndex);
1753         if (!block)
1754             continue;
1755         if (!block->isOSRTarget)
1756             continue;
1757         
1758         // Currently we don't have OSR entry trampolines. We could add them
1759         // here if need be.
1760         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1761     }
1762 }
1763
1764 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1765 {
1766     unsigned osrEntryIndex = 0;
1767     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1768         BasicBlock* block = m_jit.graph().block(blockIndex);
1769         if (!block)
1770             continue;
1771         if (!block->isOSRTarget)
1772             continue;
1773         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1774     }
1775     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1776     
1777     if (verboseCompilationEnabled()) {
1778         DumpContext dumpContext;
1779         dataLog("OSR Entries:\n");
1780         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1781             dataLog("    ", inContext(entryData, &dumpContext), "\n");
1782         if (!dumpContext.isEmpty())
1783             dumpContext.dump(WTF::dataFile());
1784     }
1785 }
1786
1787 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1788 {
1789     Edge child3 = m_jit.graph().varArgChild(node, 2);
1790     Edge child4 = m_jit.graph().varArgChild(node, 3);
1791
1792     ArrayMode arrayMode = node->arrayMode();
1793     
1794     GPRReg baseReg = base.gpr();
1795     GPRReg propertyReg = property.gpr();
1796     
1797     SpeculateDoubleOperand value(this, child3);
1798
1799     FPRReg valueReg = value.fpr();
1800     
1801     DFG_TYPE_CHECK(
1802         JSValueRegs(), child3, SpecFullRealNumber,
1803         m_jit.branchDouble(
1804             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1805     
1806     if (!m_compileOkay)
1807         return;
1808     
1809     StorageOperand storage(this, child4);
1810     GPRReg storageReg = storage.gpr();
1811
1812     if (node->op() == PutByValAlias) {
1813         // Store the value to the array.
1814         GPRReg propertyReg = property.gpr();
1815         FPRReg valueReg = value.fpr();
1816         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1817         
1818         noResult(m_currentNode);
1819         return;
1820     }
1821     
1822     GPRTemporary temporary;
1823     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1824
1825     MacroAssembler::Jump slowCase;
1826     
1827     if (arrayMode.isInBounds()) {
1828         speculationCheck(
1829             OutOfBounds, JSValueRegs(), 0,
1830             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1831     } else {
1832         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1833         
1834         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1835         
1836         if (!arrayMode.isOutOfBounds())
1837             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1838         
1839         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1840         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1841         
1842         inBounds.link(&m_jit);
1843     }
1844     
1845     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1846
1847     base.use();
1848     property.use();
1849     value.use();
1850     storage.use();
1851     
1852     if (arrayMode.isOutOfBounds()) {
1853         addSlowPathGenerator(
1854             slowPathCall(
1855                 slowCase, this,
1856                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1857                 NoResult, baseReg, propertyReg, valueReg));
1858     }
1859
1860     noResult(m_currentNode, UseChildrenCalledExplicitly);
1861 }
1862
1863 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1864 {
1865     SpeculateCellOperand string(this, node->child1());
1866     SpeculateStrictInt32Operand index(this, node->child2());
1867     StorageOperand storage(this, node->child3());
1868
1869     GPRReg stringReg = string.gpr();
1870     GPRReg indexReg = index.gpr();
1871     GPRReg storageReg = storage.gpr();
1872     
1873     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1874
1875     // unsigned comparison so we can filter out negative indices and indices that are too large
1876     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1877
1878     GPRTemporary scratch(this);
1879     GPRReg scratchReg = scratch.gpr();
1880
1881     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1882
1883     // Load the character into scratchReg
1884     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1885
1886     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1887     JITCompiler::Jump cont8Bit = m_jit.jump();
1888
1889     is16Bit.link(&m_jit);
1890
1891     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1892
1893     cont8Bit.link(&m_jit);
1894
1895     int32Result(scratchReg, m_currentNode);
1896 }
1897
1898 void SpeculativeJIT::compileGetByValOnString(Node* node)
1899 {
1900     SpeculateCellOperand base(this, node->child1());
1901     SpeculateStrictInt32Operand property(this, node->child2());
1902     StorageOperand storage(this, node->child3());
1903     GPRReg baseReg = base.gpr();
1904     GPRReg propertyReg = property.gpr();
1905     GPRReg storageReg = storage.gpr();
1906
1907     GPRTemporary scratch(this);
1908     GPRReg scratchReg = scratch.gpr();
1909 #if USE(JSVALUE32_64)
1910     GPRTemporary resultTag;
1911     GPRReg resultTagReg = InvalidGPRReg;
1912     if (node->arrayMode().isOutOfBounds()) {
1913         GPRTemporary realResultTag(this);
1914         resultTag.adopt(realResultTag);
1915         resultTagReg = resultTag.gpr();
1916     }
1917 #endif
1918
1919     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1920
1921     // unsigned comparison so we can filter out negative indices and indices that are too large
1922     JITCompiler::Jump outOfBounds = m_jit.branch32(
1923         MacroAssembler::AboveOrEqual, propertyReg,
1924         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1925     if (node->arrayMode().isInBounds())
1926         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1927
1928     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1929
1930     // Load the character into scratchReg
1931     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1932
1933     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1934     JITCompiler::Jump cont8Bit = m_jit.jump();
1935
1936     is16Bit.link(&m_jit);
1937
1938     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1939
1940     JITCompiler::Jump bigCharacter =
1941         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1942
1943     // 8 bit string values don't need the isASCII check.
1944     cont8Bit.link(&m_jit);
1945
1946     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1947     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1948     m_jit.loadPtr(scratchReg, scratchReg);
1949
1950     addSlowPathGenerator(
1951         slowPathCall(
1952             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1953
1954     if (node->arrayMode().isOutOfBounds()) {
1955 #if USE(JSVALUE32_64)
1956         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1957 #endif
1958
1959         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1960         if (globalObject->stringPrototypeChainIsSane()) {
1961             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
1962             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
1963             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
1964             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
1965             // indexed properties either.
1966             // https://bugs.webkit.org/show_bug.cgi?id=144668
1967             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
1968             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
1969             
1970 #if USE(JSVALUE64)
1971             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1972                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
1973 #else
1974             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1975                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
1976                 baseReg, propertyReg));
1977 #endif
1978         } else {
1979 #if USE(JSVALUE64)
1980             addSlowPathGenerator(
1981                 slowPathCall(
1982                     outOfBounds, this, operationGetByValStringInt,
1983                     scratchReg, baseReg, propertyReg));
1984 #else
1985             addSlowPathGenerator(
1986                 slowPathCall(
1987                     outOfBounds, this, operationGetByValStringInt,
1988                     resultTagReg, scratchReg, baseReg, propertyReg));
1989 #endif
1990         }
1991         
1992 #if USE(JSVALUE64)
1993         jsValueResult(scratchReg, m_currentNode);
1994 #else
1995         jsValueResult(resultTagReg, scratchReg, m_currentNode);
1996 #endif
1997     } else
1998         cellResult(scratchReg, m_currentNode);
1999 }
2000
2001 void SpeculativeJIT::compileFromCharCode(Node* node)
2002 {
2003     Edge& child = node->child1();
2004     if (child.useKind() == UntypedUse) {
2005         JSValueOperand opr(this, child);
2006         JSValueRegs oprRegs = opr.jsValueRegs();
2007 #if USE(JSVALUE64)
2008         GPRTemporary result(this);
2009         JSValueRegs resultRegs = JSValueRegs(result.gpr());
2010 #else
2011         GPRTemporary resultTag(this);
2012         GPRTemporary resultPayload(this);
2013         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2014 #endif
2015         flushRegisters();
2016         callOperation(operationStringFromCharCodeUntyped, resultRegs, oprRegs);
2017         m_jit.exceptionCheck();
2018         
2019         jsValueResult(resultRegs, node);
2020         return;
2021     }
2022
2023     SpeculateStrictInt32Operand property(this, child);
2024     GPRReg propertyReg = property.gpr();
2025     GPRTemporary smallStrings(this);
2026     GPRTemporary scratch(this);
2027     GPRReg scratchReg = scratch.gpr();
2028     GPRReg smallStringsReg = smallStrings.gpr();
2029
2030     JITCompiler::JumpList slowCases;
2031     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2032     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2033     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2034
2035     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2036     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2037     cellResult(scratchReg, m_currentNode);
2038 }
2039
2040 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2041 {
2042     VirtualRegister virtualRegister = node->virtualRegister();
2043     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2044
2045     switch (info.registerFormat()) {
2046     case DataFormatStorage:
2047         RELEASE_ASSERT_NOT_REACHED();
2048
2049     case DataFormatBoolean:
2050     case DataFormatCell:
2051         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2052         return GeneratedOperandTypeUnknown;
2053
2054     case DataFormatNone:
2055     case DataFormatJSCell:
2056     case DataFormatJS:
2057     case DataFormatJSBoolean:
2058     case DataFormatJSDouble:
2059         return GeneratedOperandJSValue;
2060
2061     case DataFormatJSInt32:
2062     case DataFormatInt32:
2063         return GeneratedOperandInteger;
2064
2065     default:
2066         RELEASE_ASSERT_NOT_REACHED();
2067         return GeneratedOperandTypeUnknown;
2068     }
2069 }
2070
2071 void SpeculativeJIT::compileValueToInt32(Node* node)
2072 {
2073     switch (node->child1().useKind()) {
2074 #if USE(JSVALUE64)
2075     case Int52RepUse: {
2076         SpeculateStrictInt52Operand op1(this, node->child1());
2077         GPRTemporary result(this, Reuse, op1);
2078         GPRReg op1GPR = op1.gpr();
2079         GPRReg resultGPR = result.gpr();
2080         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
2081         int32Result(resultGPR, node, DataFormatInt32);
2082         return;
2083     }
2084 #endif // USE(JSVALUE64)
2085         
2086     case DoubleRepUse: {
2087         GPRTemporary result(this);
2088         SpeculateDoubleOperand op1(this, node->child1());
2089         FPRReg fpr = op1.fpr();
2090         GPRReg gpr = result.gpr();
2091         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2092         
2093         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
2094         
2095         int32Result(gpr, node);
2096         return;
2097     }
2098     
2099     case NumberUse:
2100     case NotCellUse: {
2101         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2102         case GeneratedOperandInteger: {
2103             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2104             GPRTemporary result(this, Reuse, op1);
2105             m_jit.move(op1.gpr(), result.gpr());
2106             int32Result(result.gpr(), node, op1.format());
2107             return;
2108         }
2109         case GeneratedOperandJSValue: {
2110             GPRTemporary result(this);
2111 #if USE(JSVALUE64)
2112             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2113
2114             GPRReg gpr = op1.gpr();
2115             GPRReg resultGpr = result.gpr();
2116             FPRTemporary tempFpr(this);
2117             FPRReg fpr = tempFpr.fpr();
2118
2119             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2120             JITCompiler::JumpList converted;
2121
2122             if (node->child1().useKind() == NumberUse) {
2123                 DFG_TYPE_CHECK(
2124                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
2125                     m_jit.branchTest64(
2126                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2127             } else {
2128                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2129                 
2130                 DFG_TYPE_CHECK(
2131                     JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
2132                 
2133                 // It's not a cell: so true turns into 1 and all else turns into 0.
2134                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2135                 converted.append(m_jit.jump());
2136                 
2137                 isNumber.link(&m_jit);
2138             }
2139
2140             // First, if we get here we have a double encoded as a JSValue
2141             unboxDouble(gpr, resultGpr, fpr);
2142
2143             silentSpillAllRegisters(resultGpr);
2144             callOperation(toInt32, resultGpr, fpr);
2145             silentFillAllRegisters(resultGpr);
2146
2147             converted.append(m_jit.jump());
2148
2149             isInteger.link(&m_jit);
2150             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2151
2152             converted.link(&m_jit);
2153 #else
2154             Node* childNode = node->child1().node();
2155             VirtualRegister virtualRegister = childNode->virtualRegister();
2156             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2157
2158             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2159
2160             GPRReg payloadGPR = op1.payloadGPR();
2161             GPRReg resultGpr = result.gpr();
2162         
2163             JITCompiler::JumpList converted;
2164
2165             if (info.registerFormat() == DataFormatJSInt32)
2166                 m_jit.move(payloadGPR, resultGpr);
2167             else {
2168                 GPRReg tagGPR = op1.tagGPR();
2169                 FPRTemporary tempFpr(this);
2170                 FPRReg fpr = tempFpr.fpr();
2171                 FPRTemporary scratch(this);
2172
2173                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2174
2175                 if (node->child1().useKind() == NumberUse) {
2176                     DFG_TYPE_CHECK(
2177                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2178                         m_jit.branch32(
2179                             MacroAssembler::AboveOrEqual, tagGPR,
2180                             TrustedImm32(JSValue::LowestTag)));
2181                 } else {
2182                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2183                     
2184                     DFG_TYPE_CHECK(
2185                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2186                         m_jit.branchIfCell(op1.jsValueRegs()));
2187                     
2188                     // It's not a cell: so true turns into 1 and all else turns into 0.
2189                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2190                     m_jit.move(TrustedImm32(0), resultGpr);
2191                     converted.append(m_jit.jump());
2192                     
2193                     isBoolean.link(&m_jit);
2194                     m_jit.move(payloadGPR, resultGpr);
2195                     converted.append(m_jit.jump());
2196                     
2197                     isNumber.link(&m_jit);
2198                 }
2199
2200                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2201
2202                 silentSpillAllRegisters(resultGpr);
2203                 callOperation(toInt32, resultGpr, fpr);
2204                 silentFillAllRegisters(resultGpr);
2205
2206                 converted.append(m_jit.jump());
2207
2208                 isInteger.link(&m_jit);
2209                 m_jit.move(payloadGPR, resultGpr);
2210
2211                 converted.link(&m_jit);
2212             }
2213 #endif
2214             int32Result(resultGpr, node);
2215             return;
2216         }
2217         case GeneratedOperandTypeUnknown:
2218             RELEASE_ASSERT(!m_compileOkay);
2219             return;
2220         }
2221         RELEASE_ASSERT_NOT_REACHED();
2222         return;
2223     }
2224     
2225     default:
2226         ASSERT(!m_compileOkay);
2227         return;
2228     }
2229 }
2230
2231 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2232 {
2233     if (doesOverflow(node->arithMode())) {
2234         if (enableInt52()) {
2235             SpeculateInt32Operand op1(this, node->child1());
2236             GPRTemporary result(this, Reuse, op1);
2237             m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr());
2238             strictInt52Result(result.gpr(), node);
2239             return;
2240         }
2241         SpeculateInt32Operand op1(this, node->child1());
2242         FPRTemporary result(this);
2243             
2244         GPRReg inputGPR = op1.gpr();
2245         FPRReg outputFPR = result.fpr();
2246             
2247         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2248             
2249         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2250         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2251         positive.link(&m_jit);
2252             
2253         doubleResult(outputFPR, node);
2254         return;
2255     }
2256     
2257     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2258
2259     SpeculateInt32Operand op1(this, node->child1());
2260     GPRTemporary result(this);
2261
2262     m_jit.move(op1.gpr(), result.gpr());
2263
2264     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2265
2266     int32Result(result.gpr(), node, op1.format());
2267 }
2268
2269 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2270 {
2271     SpeculateDoubleOperand op1(this, node->child1());
2272     FPRTemporary scratch(this);
2273     GPRTemporary result(this);
2274     
2275     FPRReg valueFPR = op1.fpr();
2276     FPRReg scratchFPR = scratch.fpr();
2277     GPRReg resultGPR = result.gpr();
2278
2279     JITCompiler::JumpList failureCases;
2280     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2281     m_jit.branchConvertDoubleToInt32(
2282         valueFPR, resultGPR, failureCases, scratchFPR,
2283         shouldCheckNegativeZero(node->arithMode()));
2284     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2285
2286     int32Result(resultGPR, node);
2287 }
2288
2289 void SpeculativeJIT::compileDoubleRep(Node* node)
2290 {
2291     switch (node->child1().useKind()) {
2292     case RealNumberUse: {
2293         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2294         FPRTemporary result(this);
2295         
2296         JSValueRegs op1Regs = op1.jsValueRegs();
2297         FPRReg resultFPR = result.fpr();
2298         
2299 #if USE(JSVALUE64)
2300         GPRTemporary temp(this);
2301         GPRReg tempGPR = temp.gpr();
2302         m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR);
2303 #else
2304         FPRTemporary temp(this);
2305         FPRReg tempFPR = temp.fpr();
2306         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2307 #endif
2308         
2309         JITCompiler::Jump done = m_jit.branchDouble(
2310             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2311         
2312         DFG_TYPE_CHECK(
2313             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2314         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2315         
2316         done.link(&m_jit);
2317         
2318         doubleResult(resultFPR, node);
2319         return;
2320     }
2321     
2322     case NotCellUse:
2323     case NumberUse: {
2324         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2325
2326         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2327         if (isInt32Speculation(possibleTypes)) {
2328             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2329             FPRTemporary result(this);
2330             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2331             doubleResult(result.fpr(), node);
2332             return;
2333         }
2334
2335         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2336         FPRTemporary result(this);
2337
2338 #if USE(JSVALUE64)
2339         GPRTemporary temp(this);
2340
2341         GPRReg op1GPR = op1.gpr();
2342         GPRReg tempGPR = temp.gpr();
2343         FPRReg resultFPR = result.fpr();
2344         JITCompiler::JumpList done;
2345
2346         JITCompiler::Jump isInteger = m_jit.branch64(
2347             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2348
2349         if (node->child1().useKind() == NotCellUse) {
2350             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2351             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2352
2353             static const double zero = 0;
2354             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2355
2356             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2357             done.append(isNull);
2358
2359             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2360                 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2361
2362             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2363             static const double one = 1;
2364             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2365             done.append(m_jit.jump());
2366             done.append(isFalse);
2367
2368             isUndefined.link(&m_jit);
2369             static const double NaN = PNaN;
2370             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2371             done.append(m_jit.jump());
2372
2373             isNumber.link(&m_jit);
2374         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2375             typeCheck(
2376                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2377                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2378         }
2379
2380         unboxDouble(op1GPR, tempGPR, resultFPR);
2381         done.append(m_jit.jump());
2382     
2383         isInteger.link(&m_jit);
2384         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2385         done.link(&m_jit);
2386 #else // USE(JSVALUE64) -> this is the 32_64 case
2387         FPRTemporary temp(this);
2388     
2389         GPRReg op1TagGPR = op1.tagGPR();
2390         GPRReg op1PayloadGPR = op1.payloadGPR();
2391         FPRReg tempFPR = temp.fpr();
2392         FPRReg resultFPR = result.fpr();
2393         JITCompiler::JumpList done;
2394     
2395         JITCompiler::Jump isInteger = m_jit.branch32(
2396             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2397
2398         if (node->child1().useKind() == NotCellUse) {
2399             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2400             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2401
2402             static const double zero = 0;
2403             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2404
2405             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2406             done.append(isNull);
2407
2408             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2409
2410             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2411             static const double one = 1;
2412             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2413             done.append(m_jit.jump());
2414             done.append(isFalse);
2415
2416             isUndefined.link(&m_jit);
2417             static const double NaN = PNaN;
2418             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2419             done.append(m_jit.jump());
2420
2421             isNumber.link(&m_jit);
2422         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2423             typeCheck(
2424                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2425                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2426         }
2427
2428         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2429         done.append(m_jit.jump());
2430     
2431         isInteger.link(&m_jit);
2432         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2433         done.link(&m_jit);
2434 #endif // USE(JSVALUE64)
2435     
2436         doubleResult(resultFPR, node);
2437         return;
2438     }
2439         
2440 #if USE(JSVALUE64)
2441     case Int52RepUse: {
2442         SpeculateStrictInt52Operand value(this, node->child1());
2443         FPRTemporary result(this);
2444         
2445         GPRReg valueGPR = value.gpr();
2446         FPRReg resultFPR = result.fpr();
2447
2448         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2449         
2450         doubleResult(resultFPR, node);
2451         return;
2452     }
2453 #endif // USE(JSVALUE64)
2454         
2455     default:
2456         RELEASE_ASSERT_NOT_REACHED();
2457         return;
2458     }
2459 }
2460
2461 void SpeculativeJIT::compileValueRep(Node* node)
2462 {
2463     switch (node->child1().useKind()) {
2464     case DoubleRepUse: {
2465         SpeculateDoubleOperand value(this, node->child1());
2466         JSValueRegsTemporary result(this);
2467         
2468         FPRReg valueFPR = value.fpr();
2469         JSValueRegs resultRegs = result.regs();
2470         
2471         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2472         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2473         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2474         // local was purified.
2475         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2476             m_jit.purifyNaN(valueFPR);
2477
2478         boxDouble(valueFPR, resultRegs);
2479         
2480         jsValueResult(resultRegs, node);
2481         return;
2482     }
2483         
2484 #if USE(JSVALUE64)
2485     case Int52RepUse: {
2486         SpeculateStrictInt52Operand value(this, node->child1());
2487         GPRTemporary result(this);
2488         
2489         GPRReg valueGPR = value.gpr();
2490         GPRReg resultGPR = result.gpr();
2491         
2492         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2493         
2494         jsValueResult(resultGPR, node);
2495         return;
2496     }
2497 #endif // USE(JSVALUE64)
2498         
2499     default:
2500         RELEASE_ASSERT_NOT_REACHED();
2501         return;
2502     }
2503 }
2504
2505 static double clampDoubleToByte(double d)
2506 {
2507     d += 0.5;
2508     if (!(d > 0))
2509         d = 0;
2510     else if (d > 255)
2511         d = 255;
2512     return d;
2513 }
2514
2515 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2516 {
2517     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2518     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2519     jit.xorPtr(result, result);
2520     MacroAssembler::Jump clamped = jit.jump();
2521     tooBig.link(&jit);
2522     jit.move(JITCompiler::TrustedImm32(255), result);
2523     clamped.link(&jit);
2524     inBounds.link(&jit);
2525 }
2526
2527 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2528 {
2529     // Unordered compare so we pick up NaN
2530     static const double zero = 0;
2531     static const double byteMax = 255;
2532     static const double half = 0.5;
2533     jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2534     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2535     jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2536     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2537     
2538     jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2539     // FIXME: This should probably just use a floating point round!
2540     // https://bugs.webkit.org/show_bug.cgi?id=72054
2541     jit.addDouble(source, scratch);
2542     jit.truncateDoubleToInt32(scratch, result);   
2543     MacroAssembler::Jump truncatedInt = jit.jump();
2544     
2545     tooSmall.link(&jit);
2546     jit.xorPtr(result, result);
2547     MacroAssembler::Jump zeroed = jit.jump();
2548     
2549     tooBig.link(&jit);
2550     jit.move(JITCompiler::TrustedImm32(255), result);
2551     
2552     truncatedInt.link(&jit);
2553     zeroed.link(&jit);
2554
2555 }
2556
2557 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2558 {
2559     if (node->op() == PutByValAlias)
2560         return JITCompiler::Jump();
2561     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2562         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2563     if (view) {
2564         uint32_t length = view->length();
2565         Node* indexNode = m_jit.graph().child(node, 1).node();
2566         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2567             return JITCompiler::Jump();
2568         return m_jit.branch32(
2569             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2570     }
2571     return m_jit.branch32(
2572         MacroAssembler::AboveOrEqual, indexGPR,
2573         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2574 }
2575
2576 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2577 {
2578     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2579     if (!jump.isSet())
2580         return;
2581     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2582 }
2583
2584 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2585 {
2586     ASSERT(isInt(type));
2587     
2588     SpeculateCellOperand base(this, node->child1());
2589     SpeculateStrictInt32Operand property(this, node->child2());
2590     StorageOperand storage(this, node->child3());
2591
2592     GPRReg baseReg = base.gpr();
2593     GPRReg propertyReg = property.gpr();
2594     GPRReg storageReg = storage.gpr();
2595
2596     GPRTemporary result(this);
2597     GPRReg resultReg = result.gpr();
2598
2599     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2600
2601     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2602     switch (elementSize(type)) {
2603     case 1:
2604         if (isSigned(type))
2605             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2606         else
2607             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2608         break;
2609     case 2:
2610         if (isSigned(type))
2611             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2612         else
2613             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2614         break;
2615     case 4:
2616         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2617         break;
2618     default:
2619         CRASH();
2620     }
2621     if (elementSize(type) < 4 || isSigned(type)) {
2622         int32Result(resultReg, node);
2623         return;
2624     }
2625     
2626     ASSERT(elementSize(type) == 4 && !isSigned(type));
2627     if (node->shouldSpeculateInt32()) {
2628         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2629         int32Result(resultReg, node);
2630         return;
2631     }
2632     
2633 #if USE(JSVALUE64)
2634     if (node->shouldSpeculateMachineInt()) {
2635         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2636         strictInt52Result(resultReg, node);
2637         return;
2638     }
2639 #endif
2640     
2641     FPRTemporary fresult(this);
2642     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2643     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2644     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2645     positive.link(&m_jit);
2646     doubleResult(fresult.fpr(), node);
2647 }
2648
2649 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2650 {
2651     ASSERT(isInt(type));
2652     
2653     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2654     GPRReg storageReg = storage.gpr();
2655     
2656     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2657     
2658     GPRTemporary value;
2659     GPRReg valueGPR = InvalidGPRReg;
2660     
2661     if (valueUse->isConstant()) {
2662         JSValue jsValue = valueUse->asJSValue();
2663         if (!jsValue.isNumber()) {
2664             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2665             noResult(node);
2666             return;
2667         }
2668         double d = jsValue.asNumber();
2669         if (isClamped(type)) {
2670             ASSERT(elementSize(type) == 1);
2671             d = clampDoubleToByte(d);
2672         }
2673         GPRTemporary scratch(this);
2674         GPRReg scratchReg = scratch.gpr();
2675         m_jit.move(Imm32(toInt32(d)), scratchReg);
2676         value.adopt(scratch);
2677         valueGPR = scratchReg;
2678     } else {
2679         switch (valueUse.useKind()) {
2680         case Int32Use: {
2681             SpeculateInt32Operand valueOp(this, valueUse);
2682             GPRTemporary scratch(this);
2683             GPRReg scratchReg = scratch.gpr();
2684             m_jit.move(valueOp.gpr(), scratchReg);
2685             if (isClamped(type)) {
2686                 ASSERT(elementSize(type) == 1);
2687                 compileClampIntegerToByte(m_jit, scratchReg);
2688             }
2689             value.adopt(scratch);
2690             valueGPR = scratchReg;
2691             break;
2692         }
2693             
2694 #if USE(JSVALUE64)
2695         case Int52RepUse: {
2696             SpeculateStrictInt52Operand valueOp(this, valueUse);
2697             GPRTemporary scratch(this);
2698             GPRReg scratchReg = scratch.gpr();
2699             m_jit.move(valueOp.gpr(), scratchReg);
2700             if (isClamped(type)) {
2701                 ASSERT(elementSize(type) == 1);
2702                 MacroAssembler::Jump inBounds = m_jit.branch64(
2703                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2704                 MacroAssembler::Jump tooBig = m_jit.branch64(
2705                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2706                 m_jit.move(TrustedImm32(0), scratchReg);
2707                 MacroAssembler::Jump clamped = m_jit.jump();
2708                 tooBig.link(&m_jit);
2709                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2710                 clamped.link(&m_jit);
2711                 inBounds.link(&m_jit);
2712             }
2713             value.adopt(scratch);
2714             valueGPR = scratchReg;
2715             break;
2716         }
2717 #endif // USE(JSVALUE64)
2718             
2719         case DoubleRepUse: {
2720             if (isClamped(type)) {
2721                 ASSERT(elementSize(type) == 1);
2722                 SpeculateDoubleOperand valueOp(this, valueUse);
2723                 GPRTemporary result(this);
2724                 FPRTemporary floatScratch(this);
2725                 FPRReg fpr = valueOp.fpr();
2726                 GPRReg gpr = result.gpr();
2727                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2728                 value.adopt(result);
2729                 valueGPR = gpr;
2730             } else {
2731                 SpeculateDoubleOperand valueOp(this, valueUse);
2732                 GPRTemporary result(this);
2733                 FPRReg fpr = valueOp.fpr();
2734                 GPRReg gpr = result.gpr();
2735                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2736                 m_jit.xorPtr(gpr, gpr);
2737                 MacroAssembler::Jump fixed = m_jit.jump();
2738                 notNaN.link(&m_jit);
2739                 
2740                 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2741                     fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2742                 
2743                 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
2744                 
2745                 fixed.link(&m_jit);
2746                 value.adopt(result);
2747                 valueGPR = gpr;
2748             }
2749             break;
2750         }
2751             
2752         default:
2753             RELEASE_ASSERT_NOT_REACHED();
2754             break;
2755         }
2756     }
2757     
2758     ASSERT_UNUSED(valueGPR, valueGPR != property);
2759     ASSERT(valueGPR != base);
2760     ASSERT(valueGPR != storageReg);
2761     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2762     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2763         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2764         outOfBounds = MacroAssembler::Jump();
2765     }
2766
2767     switch (elementSize(type)) {
2768     case 1:
2769         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2770         break;
2771     case 2:
2772         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2773         break;
2774     case 4:
2775         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2776         break;
2777     default:
2778         CRASH();
2779     }
2780     if (outOfBounds.isSet())
2781         outOfBounds.link(&m_jit);
2782     noResult(node);
2783 }
2784
2785 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2786 {
2787     ASSERT(isFloat(type));
2788     
2789     SpeculateCellOperand base(this, node->child1());
2790     SpeculateStrictInt32Operand property(this, node->child2());
2791     StorageOperand storage(this, node->child3());
2792
2793     GPRReg baseReg = base.gpr();
2794     GPRReg propertyReg = property.gpr();
2795     GPRReg storageReg = storage.gpr();
2796
2797     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2798
2799     FPRTemporary result(this);
2800     FPRReg resultReg = result.fpr();
2801     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2802     switch (elementSize(type)) {
2803     case 4:
2804         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2805         m_jit.convertFloatToDouble(resultReg, resultReg);
2806         break;
2807     case 8: {
2808         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2809         break;
2810     }
2811     default:
2812         RELEASE_ASSERT_NOT_REACHED();
2813     }
2814     
2815     doubleResult(resultReg, node);
2816 }
2817
2818 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2819 {
2820     ASSERT(isFloat(type));
2821     
2822     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2823     GPRReg storageReg = storage.gpr();
2824     
2825     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2826     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2827
2828     SpeculateDoubleOperand valueOp(this, valueUse);
2829     FPRTemporary scratch(this);
2830     FPRReg valueFPR = valueOp.fpr();
2831     FPRReg scratchFPR = scratch.fpr();
2832
2833     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2834     
2835     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2836     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2837         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2838         outOfBounds = MacroAssembler::Jump();
2839     }
2840     
2841     switch (elementSize(type)) {
2842     case 4: {
2843         m_jit.moveDouble(valueFPR, scratchFPR);
2844         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2845         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2846         break;
2847     }
2848     case 8:
2849         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2850         break;
2851     default:
2852         RELEASE_ASSERT_NOT_REACHED();
2853     }
2854     if (outOfBounds.isSet())
2855         outOfBounds.link(&m_jit);
2856     noResult(node);
2857 }
2858
2859 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2860 {
2861     // Check that prototype is an object.
2862     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
2863     
2864     // Initialize scratchReg with the value being checked.
2865     m_jit.move(valueReg, scratchReg);
2866     
2867     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2868     MacroAssembler::Label loop(&m_jit);
2869     MacroAssembler::Jump performDefaultHasInstance = m_jit.branch8(MacroAssembler::Equal,
2870         MacroAssembler::Address(scratchReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType));
2871     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2872     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2873     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2874 #if USE(JSVALUE64)
2875     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2876 #else
2877     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2878 #endif
2879     
2880     // No match - result is false.
2881 #if USE(JSVALUE64)
2882     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2883 #else
2884     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2885 #endif
2886     MacroAssembler::JumpList doneJumps; 
2887     doneJumps.append(m_jit.jump());
2888
2889     performDefaultHasInstance.link(&m_jit);
2890     silentSpillAllRegisters(scratchReg);
2891     callOperation(operationDefaultHasInstance, scratchReg, valueReg, prototypeReg); 
2892     silentFillAllRegisters(scratchReg);
2893     m_jit.exceptionCheck();
2894 #if USE(JSVALUE64)
2895     m_jit.or32(TrustedImm32(ValueFalse), scratchReg);
2896 #endif
2897     doneJumps.append(m_jit.jump());
2898     
2899     isInstance.link(&m_jit);
2900 #if USE(JSVALUE64)
2901     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2902 #else
2903     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2904 #endif
2905     
2906     doneJumps.link(&m_jit);
2907 }
2908
2909 void SpeculativeJIT::compileCheckTypeInfoFlags(Node* node)
2910 {
2911     SpeculateCellOperand base(this, node->child1());
2912
2913     GPRReg baseGPR = base.gpr();
2914
2915     speculationCheck(BadTypeInfoFlags, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(node->typeInfoOperand())));
2916
2917     noResult(node);
2918 }
2919
2920 void SpeculativeJIT::compileInstanceOf(Node* node)
2921 {
2922     if (node->child1().useKind() == UntypedUse) {
2923         // It might not be a cell. Speculate less aggressively.
2924         // Or: it might only be used once (i.e. by us), so we get zero benefit
2925         // from speculating any more aggressively than we absolutely need to.
2926         
2927         JSValueOperand value(this, node->child1());
2928         SpeculateCellOperand prototype(this, node->child2());
2929         GPRTemporary scratch(this);
2930         GPRTemporary scratch2(this);
2931         
2932         GPRReg prototypeReg = prototype.gpr();
2933         GPRReg scratchReg = scratch.gpr();
2934         GPRReg scratch2Reg = scratch2.gpr();
2935         
2936         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
2937         GPRReg valueReg = value.jsValueRegs().payloadGPR();
2938         moveFalseTo(scratchReg);
2939
2940         MacroAssembler::Jump done = m_jit.jump();
2941         
2942         isCell.link(&m_jit);
2943         
2944         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2945         
2946         done.link(&m_jit);
2947
2948         blessedBooleanResult(scratchReg, node);
2949         return;
2950     }
2951     
2952     SpeculateCellOperand value(this, node->child1());
2953     SpeculateCellOperand prototype(this, node->child2());
2954     
2955     GPRTemporary scratch(this);
2956     GPRTemporary scratch2(this);
2957     
2958     GPRReg valueReg = value.gpr();
2959     GPRReg prototypeReg = prototype.gpr();
2960     GPRReg scratchReg = scratch.gpr();
2961     GPRReg scratch2Reg = scratch2.gpr();
2962     
2963     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2964
2965     blessedBooleanResult(scratchReg, node);
2966 }
2967
2968 template<typename SnippetGenerator, J_JITOperation_EJJ snippetSlowPathFunction>
2969 void SpeculativeJIT::emitUntypedBitOp(Node* node)
2970 {
2971     Edge& leftChild = node->child1();
2972     Edge& rightChild = node->child2();
2973
2974     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
2975         JSValueOperand left(this, leftChild);
2976         JSValueOperand right(this, rightChild);
2977         JSValueRegs leftRegs = left.jsValueRegs();
2978         JSValueRegs rightRegs = right.jsValueRegs();
2979 #if USE(JSVALUE64)
2980         GPRTemporary result(this);
2981         JSValueRegs resultRegs = JSValueRegs(result.gpr());
2982 #else
2983         GPRTemporary resultTag(this);
2984         GPRTemporary resultPayload(this);
2985         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2986 #endif
2987         flushRegisters();
2988         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
2989         m_jit.exceptionCheck();
2990
2991         jsValueResult(resultRegs, node);
2992         return;
2993     }
2994
2995     Optional<JSValueOperand> left;
2996     Optional<JSValueOperand> right;
2997
2998     JSValueRegs leftRegs;
2999     JSValueRegs rightRegs;
3000
3001 #if USE(JSVALUE64)
3002     GPRTemporary result(this);
3003     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3004     GPRTemporary scratch(this);
3005     GPRReg scratchGPR = scratch.gpr();
3006 #else
3007     GPRTemporary resultTag(this);
3008     GPRTemporary resultPayload(this);
3009     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3010     GPRReg scratchGPR = resultTag.gpr();
3011 #endif
3012
3013     SnippetOperand leftOperand;
3014     SnippetOperand rightOperand;
3015
3016     // The snippet generator does not support both operands being constant. If the left
3017     // operand is already const, we'll ignore the right operand's constness.
3018     if (leftChild->isInt32Constant())
3019         leftOperand.setConstInt32(leftChild->asInt32());
3020     else if (rightChild->isInt32Constant())
3021         rightOperand.setConstInt32(rightChild->asInt32());
3022
3023     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3024
3025     if (!leftOperand.isConst()) {
3026         left = JSValueOperand(this, leftChild);
3027         leftRegs = left->jsValueRegs();
3028     }
3029     if (!rightOperand.isConst()) {
3030         right = JSValueOperand(this, rightChild);
3031         rightRegs = right->jsValueRegs();
3032     }
3033
3034     SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
3035     gen.generateFastPath(m_jit);
3036
3037     ASSERT(gen.didEmitFastPath());
3038     gen.endJumpList().append(m_jit.jump());
3039
3040     gen.slowPathJumpList().link(&m_jit);
3041     silentSpillAllRegisters(resultRegs);
3042
3043     if (leftOperand.isConst()) {
3044         leftRegs = resultRegs;
3045         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3046     } else if (rightOperand.isConst()) {
3047         rightRegs = resultRegs;
3048         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3049     }
3050
3051     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3052
3053     silentFillAllRegisters(resultRegs);
3054     m_jit.exceptionCheck();
3055
3056     gen.endJumpList().link(&m_jit);
3057     jsValueResult(resultRegs, node);
3058 }
3059
3060 void SpeculativeJIT::compileBitwiseOp(Node* node)
3061 {
3062     NodeType op = node->op();
3063     Edge& leftChild = node->child1();
3064     Edge& rightChild = node->child2();
3065
3066     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3067         switch (op) {
3068         case BitAnd:
3069             emitUntypedBitOp<JITBitAndGenerator, operationValueBitAnd>(node);
3070             return;
3071         case BitOr:
3072             emitUntypedBitOp<JITBitOrGenerator, operationValueBitOr>(node);
3073             return;
3074         case BitXor:
3075             emitUntypedBitOp<JITBitXorGenerator, operationValueBitXor>(node);
3076             return;
3077         default:
3078             RELEASE_ASSERT_NOT_REACHED();
3079         }
3080     }
3081
3082     if (leftChild->isInt32Constant()) {
3083         SpeculateInt32Operand op2(this, rightChild);
3084         GPRTemporary result(this, Reuse, op2);
3085
3086         bitOp(op, leftChild->asInt32(), op2.gpr(), result.gpr());
3087
3088         int32Result(result.gpr(), node);
3089
3090     } else if (rightChild->isInt32Constant()) {
3091         SpeculateInt32Operand op1(this, leftChild);
3092         GPRTemporary result(this, Reuse, op1);
3093
3094         bitOp(op, rightChild->asInt32(), op1.gpr(), result.gpr());
3095
3096         int32Result(result.gpr(), node);
3097
3098     } else {
3099         SpeculateInt32Operand op1(this, leftChild);
3100         SpeculateInt32Operand op2(this, rightChild);
3101         GPRTemporary result(this, Reuse, op1, op2);
3102         
3103         GPRReg reg1 = op1.gpr();
3104         GPRReg reg2 = op2.gpr();
3105         bitOp(op, reg1, reg2, result.gpr());
3106         
3107         int32Result(result.gpr(), node);
3108     }
3109 }
3110
3111 void SpeculativeJIT::emitUntypedRightShiftBitOp(Node* node)
3112 {
3113     J_JITOperation_EJJ snippetSlowPathFunction = node->op() == BitRShift
3114         ? operationValueBitRShift : operationValueBitURShift;
3115     JITRightShiftGenerator::ShiftType shiftType = node->op() == BitRShift
3116         ? JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift;
3117
3118     Edge& leftChild = node->child1();
3119     Edge& rightChild = node->child2();
3120
3121     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3122         JSValueOperand left(this, leftChild);
3123         JSValueOperand right(this, rightChild);
3124         JSValueRegs leftRegs = left.jsValueRegs();
3125         JSValueRegs rightRegs = right.jsValueRegs();
3126 #if USE(JSVALUE64)
3127         GPRTemporary result(this);
3128         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3129 #else
3130         GPRTemporary resultTag(this);
3131         GPRTemporary resultPayload(this);
3132         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3133 #endif
3134         flushRegisters();
3135         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3136         m_jit.exceptionCheck();
3137
3138         jsValueResult(resultRegs, node);
3139         return;
3140     }
3141
3142     Optional<JSValueOperand> left;
3143     Optional<JSValueOperand> right;
3144
3145     JSValueRegs leftRegs;
3146     JSValueRegs rightRegs;
3147
3148     FPRTemporary leftNumber(this);
3149     FPRReg leftFPR = leftNumber.fpr();
3150
3151 #if USE(JSVALUE64)
3152     GPRTemporary result(this);
3153     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3154     GPRTemporary scratch(this);
3155     GPRReg scratchGPR = scratch.gpr();
3156     FPRReg scratchFPR = InvalidFPRReg;
3157 #else
3158     GPRTemporary resultTag(this);
3159     GPRTemporary resultPayload(this);
3160     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3161     GPRReg scratchGPR = resultTag.gpr();
3162     FPRTemporary fprScratch(this);
3163     FPRReg scratchFPR = fprScratch.fpr();
3164 #endif
3165
3166     SnippetOperand leftOperand;
3167     SnippetOperand rightOperand;
3168
3169     // The snippet generator does not support both operands being constant. If the left
3170     // operand is already const, we'll ignore the right operand's constness.
3171     if (leftChild->isInt32Constant())
3172         leftOperand.setConstInt32(leftChild->asInt32());
3173     else if (rightChild->isInt32Constant())
3174         rightOperand.setConstInt32(rightChild->asInt32());
3175
3176     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3177
3178     if (!leftOperand.isConst()) {
3179         left = JSValueOperand(this, leftChild);
3180         leftRegs = left->jsValueRegs();
3181     }
3182     if (!rightOperand.isConst()) {
3183         right = JSValueOperand(this, rightChild);
3184         rightRegs = right->jsValueRegs();
3185     }
3186
3187     JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3188         leftFPR, scratchGPR, scratchFPR, shiftType);
3189     gen.generateFastPath(m_jit);
3190
3191     ASSERT(gen.didEmitFastPath());
3192     gen.endJumpList().append(m_jit.jump());
3193
3194     gen.slowPathJumpList().link(&m_jit);
3195     silentSpillAllRegisters(resultRegs);
3196
3197     if (leftOperand.isConst()) {
3198         leftRegs = resultRegs;
3199         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3200     } else if (rightOperand.isConst()) {
3201         rightRegs = resultRegs;
3202         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3203     }
3204
3205     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3206
3207     silentFillAllRegisters(resultRegs);
3208     m_jit.exceptionCheck();
3209
3210     gen.endJumpList().link(&m_jit);
3211     jsValueResult(resultRegs, node);
3212     return;
3213 }
3214
3215 void SpeculativeJIT::compileShiftOp(Node* node)
3216 {
3217     NodeType op = node->op();
3218     Edge& leftChild = node->child1();
3219     Edge& rightChild = node->child2();
3220
3221     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3222         switch (op) {
3223         case BitLShift:
3224             emitUntypedBitOp<JITLeftShiftGenerator, operationValueBitLShift>(node);
3225             return;
3226         case BitRShift:
3227         case BitURShift:
3228             emitUntypedRightShiftBitOp(node);
3229             return;
3230         default:
3231             RELEASE_ASSERT_NOT_REACHED();
3232         }
3233     }
3234
3235     if (rightChild->isInt32Constant()) {
3236         SpeculateInt32Operand op1(this, leftChild);
3237         GPRTemporary result(this, Reuse, op1);
3238
3239         shiftOp(op, op1.gpr(), rightChild->asInt32() & 0x1f, result.gpr());
3240
3241         int32Result(result.gpr(), node);
3242     } else {
3243         // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
3244         SpeculateInt32Operand op1(this, leftChild);
3245         SpeculateInt32Operand op2(this, rightChild);
3246         GPRTemporary result(this, Reuse, op1);
3247
3248         GPRReg reg1 = op1.gpr();
3249         GPRReg reg2 = op2.gpr();
3250         shiftOp(op, reg1, reg2, result.gpr());
3251
3252         int32Result(result.gpr(), node);
3253     }
3254 }
3255
3256 void SpeculativeJIT::compileValueAdd(Node* node)
3257 {
3258     Edge& leftChild = node->child1();
3259     Edge& rightChild = node->child2();
3260
3261     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3262         JSValueOperand left(this, leftChild);
3263         JSValueOperand right(this, rightChild);
3264         JSValueRegs leftRegs = left.jsValueRegs();
3265         JSValueRegs rightRegs = right.jsValueRegs();
3266 #if USE(JSVALUE64)
3267         GPRTemporary result(this);
3268         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3269 #else
3270         GPRTemporary resultTag(this);
3271         GPRTemporary resultPayload(this);
3272         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3273 #endif
3274         flushRegisters();
3275         callOperation(operationValueAddNotNumber, resultRegs, leftRegs, rightRegs);
3276         m_jit.exceptionCheck();
3277     
3278         jsValueResult(resultRegs, node);
3279         return;
3280     }
3281
3282     Optional<JSValueOperand> left;
3283     Optional<JSValueOperand> right;
3284
3285     JSValueRegs leftRegs;
3286     JSValueRegs rightRegs;
3287
3288     FPRTemporary leftNumber(this);
3289     FPRTemporary rightNumber(this);
3290     FPRReg leftFPR = leftNumber.fpr();
3291     FPRReg rightFPR = rightNumber.fpr();
3292
3293 #if USE(JSVALUE64)
3294     GPRTemporary result(this);
3295     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3296     GPRTemporary scratch(this);
3297     GPRReg scratchGPR = scratch.gpr();
3298     FPRReg scratchFPR = InvalidFPRReg;
3299 #else
3300     GPRTemporary resultTag(this);
3301     GPRTemporary resultPayload(this);
3302     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3303     GPRReg scratchGPR = resultTag.gpr();
3304     FPRTemporary fprScratch(this);
3305     FPRReg scratchFPR = fprScratch.fpr();
3306 #endif
3307
3308     SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3309     SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3310
3311     // The snippet generator does not support both operands being constant. If the left
3312     // operand is already const, we'll ignore the right operand's constness.
3313     if (leftChild->isInt32Constant())
3314         leftOperand.setConstInt32(leftChild->asInt32());
3315     else if (rightChild->isInt32Constant())
3316         rightOperand.setConstInt32(rightChild->asInt32());
3317
3318     ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3319
3320     if (!leftOperand.isConst()) {
3321         left = JSValueOperand(this, leftChild);
3322         leftRegs = left->jsValueRegs();
3323     }
3324     if (!rightOperand.isConst()) {
3325         right = JSValueOperand(this, rightChild);
3326         rightRegs = right->jsValueRegs();
3327     }
3328
3329     JITAddGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3330         leftFPR, rightFPR, scratchGPR, scratchFPR);
3331     gen.generateFastPath(m_jit);
3332
3333     ASSERT(gen.didEmitFastPath());
3334     gen.endJumpList().append(m_jit.jump());
3335
3336     gen.slowPathJumpList().link(&m_jit);
3337
3338     silentSpillAllRegisters(resultRegs);
3339
3340     if (leftOperand.isConst()) {
3341         leftRegs = resultRegs;
3342         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3343     } else if (rightOperand.isConst()) {
3344         rightRegs = resultRegs;
3345         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3346     }
3347
3348     callOperation(operationValueAdd, resultRegs, leftRegs, rightRegs);
3349
3350     silentFillAllRegisters(resultRegs);
3351     m_jit.exceptionCheck();
3352
3353     gen.endJumpList().link(&m_jit);
3354     jsValueResult(resultRegs, node);
3355     return;
3356 }
3357
3358 void SpeculativeJIT::compileInstanceOfCustom(Node* node)
3359 {
3360     // We could do something smarter here but this case is currently super rare and unless
3361     // Symbol.hasInstance becomes popular will likely remain that way.
3362
3363     JSValueOperand value(this, node->child1());
3364     SpeculateCellOperand constructor(this, node->child2());
3365     JSValueOperand hasInstanceValue(this, node->child3());
3366     GPRTemporary result(this);
3367
3368     JSValueRegs valueRegs = value.jsValueRegs();
3369     GPRReg constructorGPR = constructor.gpr();
3370     JSValueRegs hasInstanceRegs = hasInstanceValue.jsValueRegs();
3371     GPRReg resultGPR = result.gpr();
3372
3373     MacroAssembler::Jump slowCase = m_jit.jump();
3374
3375     addSlowPathGenerator(slowPathCall(slowCase, this, operationInstanceOfCustom, resultGPR, valueRegs, constructorGPR, hasInstanceRegs));
3376
3377     unblessedBooleanResult(resultGPR, node);
3378 }
3379
3380 void SpeculativeJIT::compileIsJSArray(Node* node)
3381 {
3382     JSValueOperand value(this, node->child1());
3383     GPRFlushedCallResult result(this);
3384
3385     JSValueRegs valueRegs = value.jsValueRegs();
3386     GPRReg resultGPR = result.gpr();
3387
3388     JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3389
3390     m_jit.compare8(JITCompiler::Equal,
3391         JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()),
3392         TrustedImm32(ArrayType),
3393         resultGPR);
3394     blessBoolean(resultGPR);
3395     JITCompiler::Jump done = m_jit.jump();
3396
3397     isNotCell.link(&m_jit);
3398     moveFalseTo(resultGPR);
3399
3400     done.link(&m_jit);
3401     blessedBooleanResult(resultGPR, node);
3402 }
3403
3404 void SpeculativeJIT::compileIsArrayObject(Node* node)
3405 {
3406     JSValueOperand value(this, node->child1());
3407     GPRFlushedCallResult result(this);
3408
3409     JSValueRegs valueRegs = value.jsValueRegs();
3410     GPRReg resultGPR = result.gpr();
3411
3412     JITCompiler::JumpList done;
3413
3414     JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3415
3416     JITCompiler::Jump notJSArray = m_jit.branch8(JITCompiler::NotEqual,
3417         JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()),
3418         TrustedImm32(ArrayType));
3419     m_jit.move(TrustedImm32(true), resultGPR);
3420     done.append(m_jit.jump());
3421
3422     notJSArray.link(&m_jit);
3423     silentSpillAllRegisters(resultGPR);
3424     callOperation(operationIsArrayObject, resultGPR, valueRegs);
3425     silentFillAllRegisters(resultGPR);
3426     m_jit.exceptionCheck();
3427     done.append(m_jit.jump());
3428
3429     isNotCell.link(&m_jit);
3430     m_jit.move(TrustedImm32(false), resultGPR);
3431
3432     done.link(&m_jit);
3433     unblessedBooleanResult(resultGPR, node);
3434 }
3435
3436 // FIXME: This function should just get the ClassInfo and check if it's == ArrayConstructor::info(). https://bugs.webkit.org/show_bug.cgi?id=155667
3437 void SpeculativeJIT::compileIsArrayConstructor(Node* node)
3438 {
3439     JSValueOperand value(this, node->child1());
3440     GPRFlushedCallResult result(this);
3441
3442     JSValueRegs valueRegs = value.jsValueRegs();
3443     GPRReg resultGPR = result.gpr();
3444
3445     flushRegisters();
3446     callOperation(operationIsArrayConstructor, resultGPR, valueRegs);
3447     unblessedBooleanResult(resultGPR, node);
3448 }
3449
3450 void SpeculativeJIT::compileIsRegExpObject(Node* node)
3451 {
3452     JSValueOperand value(this, node->child1());
3453     GPRFlushedCallResult result(this);
3454
3455     JSValueRegs valueRegs = value.jsValueRegs();
3456     GPRReg resultGPR = result.gpr();
3457
3458     JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3459
3460     m_jit.compare8(JITCompiler::Equal,
3461         JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()),
3462         TrustedImm32(RegExpObjectType),
3463         resultGPR);
3464     blessBoolean(resultGPR);
3465     JITCompiler::Jump done = m_jit.jump();
3466
3467     isNotCell.link(&m_jit);
3468     moveFalseTo(resultGPR);
3469
3470     done.link(&m_jit);
3471     blessedBooleanResult(resultGPR, node);
3472 }
3473
3474 void SpeculativeJIT::compileCallObjectConstructor(Node* node)
3475 {
3476     RELEASE_ASSERT(node->child1().useKind() == UntypedUse);
3477     JSValueOperand value(this, node->child1());
3478 #if USE(JSVALUE64)
3479     GPRTemporary result(this, Reuse, value);
3480 #else
3481     GPRTemporary result(this, Reuse, value, PayloadWord);
3482 #endif
3483
3484     JSValueRegs valueRegs = value.jsValueRegs();
3485     GPRReg resultGPR = result.gpr();
3486
3487     MacroAssembler::JumpList slowCases;
3488     slowCases.append(m_jit.branchIfNotCell(valueRegs));
3489     slowCases.append(m_jit.branchIfNotObject(valueRegs.payloadGPR()));
3490     m_jit.move(valueRegs.payloadGPR(), resultGPR);
3491
3492     addSlowPathGenerator(slowPathCall(slowCases, this, operationObjectConstructor, resultGPR, m_jit.globalObjectFor(node->origin.semantic), valueRegs));
3493     cellResult(resultGPR, node);
3494 }
3495
3496 void SpeculativeJIT::compileArithAdd(Node* node)
3497 {
3498     switch (node->binaryUseKind()) {
3499     case Int32Use: {
3500         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3501
3502         if (node->child2()->isInt32Constant()) {
3503             SpeculateInt32Operand op1(this, node->child1());
3504             GPRTemporary result(this, Reuse, op1);
3505
3506             GPRReg gpr1 = op1.gpr();
3507             int32_t imm2 = node->child2()->asInt32();
3508             GPRReg gprResult = result.gpr();
3509
3510             if (!shouldCheckOverflow(node->arithMode())) {
3511                 m_jit.add32(Imm32(imm2), gpr1, gprResult);
3512                 int32Result(gprResult, node);
3513                 return;
3514             }
3515
3516             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, Imm32(imm2), gprResult);
3517             if (gpr1 == gprResult) {
3518                 speculationCheck(Overflow, JSValueRegs(), 0, check,
3519                     SpeculationRecovery(SpeculativeAddImmediate, gpr1, imm2));
3520             } else
3521                 speculationCheck(Overflow, JSValueRegs(), 0, check);
3522
3523             int32Result(gprResult, node);
3524             return;
3525         }
3526                 
3527         SpeculateInt32Operand op1(this, node->child1());
3528         SpeculateInt32Operand op2(this, node->child2());
3529         GPRTemporary result(this, Reuse, op1, op2);
3530
3531         GPRReg gpr1 = op1.gpr();
3532         GPRReg gpr2 = op2.gpr();
3533         GPRReg gprResult = result.gpr();
3534
3535         if (!shouldCheckOverflow(node->arithMode()))
3536             m_jit.add32(gpr1, gpr2, gprResult);
3537         else {
3538             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
3539                 
3540             if (gpr1 == gprResult)
3541                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
3542             else if (gpr2 == gprResult)
3543                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
3544             else
3545                 speculationCheck(Overflow, JSValueRegs(), 0, check);
3546         }
3547
3548         int32Result(gprResult, node);
3549         return;
3550     }
3551         
3552 #if USE(JSVALUE64)
3553     case Int52RepUse: {
3554         ASSERT(shouldCheckOverflow(node->arithMode()));
3555         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3556
3557         // Will we need an overflow check? If we can prove that neither input can be
3558         // Int52 then the overflow check will not be necessary.
3559         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
3560             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
3561             SpeculateWhicheverInt52Operand op1(this, node->child1());
3562             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3563             GPRTemporary result(this, Reuse, op1);
3564             m_jit.add64(op1.gpr(), op2.gpr(), result.gpr());
3565             int52Result(result.gpr(), node, op1.format());
3566             return;
3567         }
3568         
3569         SpeculateInt52Operand op1(this, node->child1());
3570         SpeculateInt52Operand op2(this, node->child2());
3571         GPRTemporary result(this);
3572         m_jit.move(op1.gpr(), result.gpr());
3573         speculationCheck(
3574             Int52Overflow, JSValueRegs(), 0,
3575             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3576         int52Result(result.gpr(), node);
3577         return;
3578     }
3579 #endif // USE(JSVALUE64)
3580     
3581     case DoubleRepUse: {
3582         SpeculateDoubleOperand op1(this, node->child1());
3583         SpeculateDoubleOperand op2(this, node->child2());
3584         FPRTemporary result(this, op1, op2);
3585
3586         FPRReg reg1 = op1.fpr();
3587         FPRReg reg2 = op2.fpr();
3588         m_jit.addDouble(reg1, reg2, result.fpr());
3589
3590         doubleResult(result.fpr(), node);
3591         return;
3592     }
3593         
3594     default:
3595         RELEASE_ASSERT_NOT_REACHED();
3596         break;
3597     }
3598 }
3599
3600 void SpeculativeJIT::compileMakeRope(Node* node)
3601 {
3602     ASSERT(node->child1().useKind() == KnownStringUse);
3603     ASSERT(node->child2().useKind() == KnownStringUse);
3604     ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
3605     
3606     SpeculateCellOperand op1(this, node->child1());
3607     SpeculateCellOperand op2(this, node->child2());
3608     SpeculateCellOperand op3(this, node->child3());
3609     GPRTemporary result(this);
3610     GPRTemporary allocator(this);
3611     GPRTemporary scratch(this);
3612     
3613     GPRReg opGPRs[3];
3614     unsigned numOpGPRs;
3615     opGPRs[0] = op1.gpr();
3616     opGPRs[1] = op2.gpr();
3617     if (node->child3()) {
3618         opGPRs[2] = op3.gpr();
3619         numOpGPRs = 3;
3620     } else {
3621         opGPRs[2] = InvalidGPRReg;
3622         numOpGPRs = 2;
3623     }
3624     GPRReg resultGPR = result.gpr();
3625     GPRReg allocatorGPR = allocator.gpr();
3626     GPRReg scratchGPR = scratch.gpr();
3627     
3628     JITCompiler::JumpList slowPath;
3629     MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithDestructor(sizeof(JSRopeString));
3630     m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
3631     emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
3632         
3633     m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
3634     for (unsigned i = 0; i < numOpGPRs; ++i)
3635         m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3636     for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
3637         m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3638     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
3639     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
3640     if (!ASSERT_DISABLED) {
3641         JITCompiler::Jump ok = m_jit.branch32(
3642             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
3643         m_jit.abortWithReason(DFGNegativeStringLength);
3644         ok.link(&m_jit);
3645     }
3646     for (unsigned i = 1; i < numOpGPRs; ++i) {
3647         m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
3648         speculationCheck(
3649             Uncountable, JSValueSource(), nullptr,
3650             m_jit.branchAdd32(
3651                 JITCompiler::Overflow,
3652                 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
3653     }
3654     m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
3655     m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
3656     if (!ASSERT_DISABLED) {
3657         JITCompiler::Jump ok = m_jit.branch32(
3658             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
3659         m_jit.abortWithReason(DFGNegativeStringLength);
3660         ok.link(&m_jit);
3661     }
3662     m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
3663     
3664     switch (numOpGPRs) {
3665     case 2:
3666         addSlowPathGenerator(slowPathCall(
3667             slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
3668         break;
3669     case 3:
3670         addSlowPathGenerator(slowPathCall(
3671             slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
3672         break;
3673     default:
3674         RELEASE_ASSERT_NOT_REACHED();
3675         break;
3676     }
3677         
3678     cellResult(resultGPR, node);
3679 }
3680
3681 void SpeculativeJIT::compileArithClz32(Node* node)
3682 {
3683     ASSERT_WITH_MESSAGE(node->child1().useKind() == Int32Use || node->child1().useKind() == KnownInt32Use, "The Fixup phase should have enforced a Int32 operand.");
3684     SpeculateInt32Operand value(this, node->child1());
3685     GPRTemporary result(this, Reuse, value);
3686     GPRReg valueReg = value.gpr();
3687     GPRReg resultReg = result.gpr();
3688     m_jit.countLeadingZeros32(valueReg, resultReg);
3689     int32Result(resultReg, node);
3690 }
3691
3692 void SpeculativeJIT::compileArithSub(Node* node)
3693 {
3694     switch (node->binaryUseKind()) {
3695     case Int32Use: {
3696         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3697         
3698         if (node->child2()->isInt32Constant()) {
3699             SpeculateInt32Operand op1(this, node->child1());
3700             int32_t imm2 = node->child2()->asInt32();
3701             GPRTemporary result(this);
3702
3703             if (!shouldCheckOverflow(node->arithMode())) {
3704                 m_jit.move(op1.gpr(), result.gpr());
3705                 m_jit.sub32(Imm32(imm2), result.gpr());
3706             } else {
3707                 GPRTemporary scratch(this);
3708                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
3709             }
3710
3711             int32Result(result.gpr(), node);
3712             return;
3713         }
3714             
3715         if (node->child1()->isInt32Constant()) {
3716             int32_t imm1 = node->child1()->asInt32();
3717             SpeculateInt32Operand op2(this, node->child2());
3718             GPRTemporary result(this);
3719                 
3720             m_jit.move(Imm32(imm1), result.gpr());
3721             if (!shouldCheckOverflow(node->arithMode()))
3722                 m_jit.sub32(op2.gpr(), result.gpr());
3723             else
3724                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3725                 
3726             int32Result(result.gpr(), node);
3727             return;
3728         }
3729             
3730         SpeculateInt32Operand op1(this, node->child1());
3731         SpeculateInt32Operand op2(this, node->child2());
3732         GPRTemporary result(this);
3733
3734         if (!shouldCheckOverflow(node->arithMode())) {
3735             m_jit.move(op1.gpr(), result.gpr());
3736             m_jit.sub32(op2.gpr(), result.gpr());
3737         } else
3738             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
3739
3740         int32Result(result.gpr(), node);
3741         return;
3742     }
3743         
3744 #if USE(JSVALUE64)
3745     case Int52RepUse: {
3746         ASSERT(shouldCheckOverflow(node->arithMode()));
3747         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3748
3749         // Will we need an overflow check? If we can prove that neither input can be
3750         // Int52 then the overflow check will not be necessary.
3751         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
3752             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
3753             SpeculateWhicheverInt52Operand op1(this, node->child1());
3754             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3755             GPRTemporary result(this, Reuse, op1);
3756             m_jit.move(op1.gpr(), result.gpr());
3757             m_jit.sub64(op2.gpr(), result.gpr());
3758             int52Result(result.gpr(), node, op1.format());
3759             return;
3760         }
3761         
3762         SpeculateInt52Operand op1(this, node->child1());
3763         SpeculateInt52Operand op2(this, node->child2());
3764         GPRTemporary result(this);
3765         m_jit.move(op1.gpr(), result.gpr());
3766         speculationCheck(
3767             Int52Overflow, JSValueRegs(), 0,
3768             m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3769         int52Result(result.gpr(), node);
3770         return;
3771     }
3772 #endif // USE(JSVALUE64)
3773
3774     case DoubleRepUse: {
3775         SpeculateDoubleOperand op1(this, node->child1());
3776         SpeculateDoubleOperand op2(this, node->child2());
3777         FPRTemporary result(this, op1);
3778
3779         FPRReg reg1 = op1.fpr();
3780         FPRReg reg2 = op2.fpr();
3781         m_jit.subDouble(reg1, reg2, result.fpr());
3782
3783         doubleResult(result.fpr(), node);
3784         return;
3785     }
3786
3787     case UntypedUse: {
3788         Edge& leftChild = node->child1();
3789         Edge& rightChild = node->child2();
3790
3791         JSValueOperand left(this, leftChild);
3792         JSValueOperand right(this, rightChild);
3793
3794         JSValueRegs leftRegs = left.jsValueRegs();
3795         JSValueRegs rightRegs = right.jsValueRegs();
3796
3797         FPRTemporary leftNumber(this);
3798         FPRTemporary rightNumber(this);
3799         FPRReg leftFPR = leftNumber.fpr();
3800         FPRReg rightFPR = rightNumber.fpr();
3801
3802 #if USE(JSVALUE64)
3803         GPRTemporary result(this);
3804         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3805         GPRTemporary scratch(this);
3806         GPRReg scratchGPR = scratch.gpr();
3807         FPRReg scratchFPR = InvalidFPRReg;
3808 #else
3809         GPRTemporary resultTag(this);
3810         GPRTemporary resultPayload(this);
3811         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3812         GPRReg scratchGPR = resultTag.gpr();
3813         FPRTemporary fprScratch(this);
3814         FPRReg scratchFPR = fprScratch.fpr();
3815 #endif
3816
3817         SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3818         SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3819
3820         JITSubGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3821             leftFPR, rightFPR, scratchGPR, scratchFPR);
3822         gen.generateFastPath(m_jit);
3823
3824         ASSERT(gen.didEmitFastPath());
3825         gen.endJumpList().append(m_jit.jump());
3826
3827         gen.slowPathJumpList().link(&m_jit);
3828         silentSpillAllRegisters(resultRegs);
3829         callOperation(operationValueSub, resultRegs, leftRegs, rightRegs);
3830         silentFillAllRegisters(resultRegs);
3831         m_jit.exceptionCheck();
3832
3833         gen.endJumpList().link(&m_jit);
3834         jsValueResult(resultRegs, node);
3835         return;
3836     }
3837
3838     default:
3839         RELEASE_ASSERT_NOT_REACHED();
3840         return;
3841     }
3842 }
3843
3844 void SpeculativeJIT::compileArithNegate(Node* node)
3845 {
3846     switch (node->child1().useKind()) {
3847     case Int32Use: {
3848         SpeculateInt32Operand op1(this, node->child1());
3849         GPRTemporary result(this);
3850
3851         m_jit.move(op1.gpr(), result.gpr());
3852
3853         // Note: there is no notion of being not used as a number, but someone
3854         // caring about negative zero.
3855         
3856         if (!shouldCheckOverflow(node->arithMode()))
3857             m_jit.neg32(result.gpr());
3858         else if (!shouldCheckNegativeZero(node->arithMode()))
3859             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
3860         else {
3861             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
3862             m_jit.neg32(result.gpr());
3863         }
3864
3865         int32Result(result.gpr(), node);
3866         return;
3867     }
3868
3869 #if USE(JSVALUE64)
3870     case Int52RepUse: {
3871         ASSERT(shouldCheckOverflow(node->arithMode()));
3872         
3873         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)) {
3874             SpeculateWhicheverInt52Operand op1(this, node->child1());
3875             GPRTemporary result(this);
3876             GPRReg op1GPR = op1.gpr();
3877             GPRReg resultGPR = result.gpr();
3878             m_jit.move(op1GPR, resultGPR);
3879             m_jit.neg64(resultGPR);
3880             if (shouldCheckNegativeZero(node->arithMode())) {
3881                 speculationCheck(
3882                     NegativeZero, JSValueRegs(), 0,
3883                     m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3884             }
3885             int52Result(resultGPR, node, op1.format());
3886