Replace WTF::move with WTFMove
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGOSRExitFuzz.h"
38 #include "DFGSaneStringGetByValSlowPathGenerator.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DirectArguments.h"
41 #include "JITAddGenerator.h"
42 #include "JITBitAndGenerator.h"
43 #include "JITBitOrGenerator.h"
44 #include "JITBitXorGenerator.h"
45 #include "JITDivGenerator.h"
46 #include "JITLeftShiftGenerator.h"
47 #include "JITMulGenerator.h"
48 #include "JITRightShiftGenerator.h"
49 #include "JITSubGenerator.h"
50 #include "JSArrowFunction.h"
51 #include "JSCInlines.h"
52 #include "JSEnvironmentRecord.h"
53 #include "JSGeneratorFunction.h"
54 #include "JSLexicalEnvironment.h"
55 #include "LinkBuffer.h"
56 #include "ScopedArguments.h"
57 #include "ScratchRegisterAllocator.h"
58 #include "WriteBarrierBuffer.h"
59 #include <wtf/MathExtras.h>
60
61 namespace JSC { namespace DFG {
62
63 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
64     : m_compileOkay(true)
65     , m_jit(jit)
66     , m_currentNode(0)
67     , m_lastGeneratedNode(LastNodeType)
68     , m_indexInBlock(0)
69     , m_generationInfo(m_jit.graph().frameRegisterCount())
70     , m_state(m_jit.graph())
71     , m_interpreter(m_jit.graph(), m_state)
72     , m_stream(&jit.jitCode()->variableEventStream)
73     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
74 {
75 }
76
77 SpeculativeJIT::~SpeculativeJIT()
78 {
79 }
80
81 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
82 {
83     ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
84     
85     GPRTemporary scratch(this);
86     GPRTemporary scratch2(this);
87     GPRReg scratchGPR = scratch.gpr();
88     GPRReg scratch2GPR = scratch2.gpr();
89     
90     unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
91     
92     JITCompiler::JumpList slowCases;
93     
94     slowCases.append(
95         emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
96     m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
97     emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
98     
99     m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
100     m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
101     
102     if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
103 #if USE(JSVALUE64)
104         m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
105         for (unsigned i = numElements; i < vectorLength; ++i)
106             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
107 #else
108         EncodedValueDescriptor value;
109         value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
110         for (unsigned i = numElements; i < vectorLength; ++i) {
111             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
112             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
113         }
114 #endif
115     }
116     
117     // I want a slow path that also loads out the storage pointer, and that's
118     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
119     // of work for a very small piece of functionality. :-/
120     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
121         slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
122         structure, numElements));
123 }
124
125 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
126 {
127     if (inlineCallFrame && !inlineCallFrame->isVarargs())
128         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
129     else {
130         VirtualRegister argumentCountRegister;
131         if (!inlineCallFrame)
132             argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
133         else
134             argumentCountRegister = inlineCallFrame->argumentCountRegister;
135         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
136         if (!includeThis)
137             m_jit.sub32(TrustedImm32(1), lengthGPR);
138     }
139 }
140
141 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
142 {
143     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
144 }
145
146 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
147 {
148     if (origin.inlineCallFrame) {
149         if (origin.inlineCallFrame->isClosureCall) {
150             m_jit.loadPtr(
151                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
152                 calleeGPR);
153         } else {
154             m_jit.move(
155                 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
156                 calleeGPR);
157         }
158     } else
159         m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), calleeGPR);
160 }
161
162 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
163 {
164     m_jit.addPtr(
165         TrustedImm32(
166             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
167         GPRInfo::callFrameRegister, startGPR);
168 }
169
170 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
171 {
172     if (!doOSRExitFuzzing())
173         return MacroAssembler::Jump();
174     
175     MacroAssembler::Jump result;
176     
177     m_jit.pushToSave(GPRInfo::regT0);
178     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
179     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
180     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
181     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
182     unsigned at = Options::fireOSRExitFuzzAt();
183     if (at || atOrAfter) {
184         unsigned threshold;
185         MacroAssembler::RelationalCondition condition;
186         if (atOrAfter) {
187             threshold = atOrAfter;
188             condition = MacroAssembler::Below;
189         } else {
190             threshold = at;
191             condition = MacroAssembler::NotEqual;
192         }
193         MacroAssembler::Jump ok = m_jit.branch32(
194             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
195         m_jit.popToRestore(GPRInfo::regT0);
196         result = m_jit.jump();
197         ok.link(&m_jit);
198     }
199     m_jit.popToRestore(GPRInfo::regT0);
200     
201     return result;
202 }
203
204 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
205 {
206     if (!m_compileOkay)
207         return;
208     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
209     if (fuzzJump.isSet()) {
210         JITCompiler::JumpList jumpsToFail;
211         jumpsToFail.append(fuzzJump);
212         jumpsToFail.append(jumpToFail);
213         m_jit.appendExitInfo(jumpsToFail);
214     } else
215         m_jit.appendExitInfo(jumpToFail);
216     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
217 }
218
219 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
220 {
221     if (!m_compileOkay)
222         return;
223     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
224     if (fuzzJump.isSet()) {
225         JITCompiler::JumpList myJumpsToFail;
226         myJumpsToFail.append(jumpsToFail);
227         myJumpsToFail.append(fuzzJump);
228         m_jit.appendExitInfo(myJumpsToFail);
229     } else
230         m_jit.appendExitInfo(jumpsToFail);
231     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
232 }
233
234 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
235 {
236     if (!m_compileOkay)
237         return OSRExitJumpPlaceholder();
238     unsigned index = m_jit.jitCode()->osrExit.size();
239     m_jit.appendExitInfo();
240     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
241     return OSRExitJumpPlaceholder(index);
242 }
243
244 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
245 {
246     return speculationCheck(kind, jsValueSource, nodeUse.node());
247 }
248
249 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
250 {
251     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
252 }
253
254 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
255 {
256     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
257 }
258
259 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
260 {
261     if (!m_compileOkay)
262         return;
263     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
264     m_jit.appendExitInfo(jumpToFail);
265     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
266 }
267
268 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
269 {
270     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
271 }
272
273 void SpeculativeJIT::emitInvalidationPoint(Node* node)
274 {
275     if (!m_compileOkay)
276         return;
277     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
278     m_jit.jitCode()->appendOSRExit(OSRExit(
279         UncountableInvalidation, JSValueSource(),
280         m_jit.graph().methodOfGettingAValueProfileFor(node),
281         this, m_stream->size()));
282     info.m_replacementSource = m_jit.watchpointLabel();
283     ASSERT(info.m_replacementSource.isSet());
284     noResult(node);
285 }
286
287 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
288 {
289     if (!m_compileOkay)
290         return;
291     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
292     m_compileOkay = false;
293     if (verboseCompilationEnabled())
294         dataLog("Bailing compilation.\n");
295 }
296
297 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
298 {
299     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
300 }
301
302 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
303 {
304     ASSERT(needsTypeCheck(edge, typesPassedThrough));
305     m_interpreter.filter(edge, typesPassedThrough);
306     speculationCheck(BadType, source, edge.node(), jumpToFail);
307 }
308
309 RegisterSet SpeculativeJIT::usedRegisters()
310 {
311     RegisterSet result;
312     
313     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
314         GPRReg gpr = GPRInfo::toRegister(i);
315         if (m_gprs.isInUse(gpr))
316             result.set(gpr);
317     }
318     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
319         FPRReg fpr = FPRInfo::toRegister(i);
320         if (m_fprs.isInUse(fpr))
321             result.set(fpr);
322     }
323     
324     result.merge(RegisterSet::stubUnavailableRegisters());
325     
326     return result;
327 }
328
329 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
330 {
331     m_slowPathGenerators.append(WTFMove(slowPathGenerator));
332 }
333
334 void SpeculativeJIT::runSlowPathGenerators()
335 {
336     for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
337         m_slowPathGenerators[i]->generate(this);
338 }
339
340 // On Windows we need to wrap fmod; on other platforms we can call it directly.
341 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
342 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
343 static double JIT_OPERATION fmodAsDFGOperation(double x, double y)
344 {
345     return fmod(x, y);
346 }
347 #else
348 #define fmodAsDFGOperation fmod
349 #endif
350
351 void SpeculativeJIT::clearGenerationInfo()
352 {
353     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
354         m_generationInfo[i] = GenerationInfo();
355     m_gprs = RegisterBank<GPRInfo>();
356     m_fprs = RegisterBank<FPRInfo>();
357 }
358
359 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
360 {
361     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
362     Node* node = info.node();
363     DataFormat registerFormat = info.registerFormat();
364     ASSERT(registerFormat != DataFormatNone);
365     ASSERT(registerFormat != DataFormatDouble);
366         
367     SilentSpillAction spillAction;
368     SilentFillAction fillAction;
369         
370     if (!info.needsSpill())
371         spillAction = DoNothingForSpill;
372     else {
373 #if USE(JSVALUE64)
374         ASSERT(info.gpr() == source);
375         if (registerFormat == DataFormatInt32)
376             spillAction = Store32Payload;
377         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
378             spillAction = StorePtr;
379         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
380             spillAction = Store64;
381         else {
382             ASSERT(registerFormat & DataFormatJS);
383             spillAction = Store64;
384         }
385 #elif USE(JSVALUE32_64)
386         if (registerFormat & DataFormatJS) {
387             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
388             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
389         } else {
390             ASSERT(info.gpr() == source);
391             spillAction = Store32Payload;
392         }
393 #endif
394     }
395         
396     if (registerFormat == DataFormatInt32) {
397         ASSERT(info.gpr() == source);
398         ASSERT(isJSInt32(info.registerFormat()));
399         if (node->hasConstant()) {
400             ASSERT(node->isInt32Constant());
401             fillAction = SetInt32Constant;
402         } else
403             fillAction = Load32Payload;
404     } else if (registerFormat == DataFormatBoolean) {
405 #if USE(JSVALUE64)
406         RELEASE_ASSERT_NOT_REACHED();
407 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
408         fillAction = DoNothingForFill;
409 #endif
410 #elif USE(JSVALUE32_64)
411         ASSERT(info.gpr() == source);
412         if (node->hasConstant()) {
413             ASSERT(node->isBooleanConstant());
414             fillAction = SetBooleanConstant;
415         } else
416             fillAction = Load32Payload;
417 #endif
418     } else if (registerFormat == DataFormatCell) {
419         ASSERT(info.gpr() == source);
420         if (node->hasConstant()) {
421             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
422             node->asCell(); // To get the assertion.
423             fillAction = SetCellConstant;
424         } else {
425 #if USE(JSVALUE64)
426             fillAction = LoadPtr;
427 #else
428             fillAction = Load32Payload;
429 #endif
430         }
431     } else if (registerFormat == DataFormatStorage) {
432         ASSERT(info.gpr() == source);
433         fillAction = LoadPtr;
434     } else if (registerFormat == DataFormatInt52) {
435         if (node->hasConstant())
436             fillAction = SetInt52Constant;
437         else if (info.spillFormat() == DataFormatInt52)
438             fillAction = Load64;
439         else if (info.spillFormat() == DataFormatStrictInt52)
440             fillAction = Load64ShiftInt52Left;
441         else if (info.spillFormat() == DataFormatNone)
442             fillAction = Load64;
443         else {
444             RELEASE_ASSERT_NOT_REACHED();
445 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
446             fillAction = Load64; // Make GCC happy.
447 #endif
448         }
449     } else if (registerFormat == DataFormatStrictInt52) {
450         if (node->hasConstant())
451             fillAction = SetStrictInt52Constant;
452         else if (info.spillFormat() == DataFormatInt52)
453             fillAction = Load64ShiftInt52Right;
454         else if (info.spillFormat() == DataFormatStrictInt52)
455             fillAction = Load64;
456         else if (info.spillFormat() == DataFormatNone)
457             fillAction = Load64;
458         else {
459             RELEASE_ASSERT_NOT_REACHED();
460 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
461             fillAction = Load64; // Make GCC happy.
462 #endif
463         }
464     } else {
465         ASSERT(registerFormat & DataFormatJS);
466 #if USE(JSVALUE64)
467         ASSERT(info.gpr() == source);
468         if (node->hasConstant()) {
469             if (node->isCellConstant())
470                 fillAction = SetTrustedJSConstant;
471             else
472                 fillAction = SetJSConstant;
473         } else if (info.spillFormat() == DataFormatInt32) {
474             ASSERT(registerFormat == DataFormatJSInt32);
475             fillAction = Load32PayloadBoxInt;
476         } else
477             fillAction = Load64;
478 #else
479         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
480         if (node->hasConstant())
481             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
482         else if (info.payloadGPR() == source)
483             fillAction = Load32Payload;
484         else { // Fill the Tag
485             switch (info.spillFormat()) {
486             case DataFormatInt32:
487                 ASSERT(registerFormat == DataFormatJSInt32);
488                 fillAction = SetInt32Tag;
489                 break;
490             case DataFormatCell:
491                 ASSERT(registerFormat == DataFormatJSCell);
492                 fillAction = SetCellTag;
493                 break;
494             case DataFormatBoolean:
495                 ASSERT(registerFormat == DataFormatJSBoolean);
496                 fillAction = SetBooleanTag;
497                 break;
498             default:
499                 fillAction = Load32Tag;
500                 break;
501             }
502         }
503 #endif
504     }
505         
506     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
507 }
508     
509 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
510 {
511     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
512     Node* node = info.node();
513     ASSERT(info.registerFormat() == DataFormatDouble);
514
515     SilentSpillAction spillAction;
516     SilentFillAction fillAction;
517         
518     if (!info.needsSpill())
519         spillAction = DoNothingForSpill;
520     else {
521         ASSERT(!node->hasConstant());
522         ASSERT(info.spillFormat() == DataFormatNone);
523         ASSERT(info.fpr() == source);
524         spillAction = StoreDouble;
525     }
526         
527 #if USE(JSVALUE64)
528     if (node->hasConstant()) {
529         node->asNumber(); // To get the assertion.
530         fillAction = SetDoubleConstant;
531     } else {
532         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
533         fillAction = LoadDouble;
534     }
535 #elif USE(JSVALUE32_64)
536     ASSERT(info.registerFormat() == DataFormatDouble);
537     if (node->hasConstant()) {
538         node->asNumber(); // To get the assertion.
539         fillAction = SetDoubleConstant;
540     } else
541         fillAction = LoadDouble;
542 #endif
543
544     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
545 }
546     
547 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
548 {
549     switch (plan.spillAction()) {
550     case DoNothingForSpill:
551         break;
552     case Store32Tag:
553         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
554         break;
555     case Store32Payload:
556         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
557         break;
558     case StorePtr:
559         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
560         break;
561 #if USE(JSVALUE64)
562     case Store64:
563         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
564         break;
565 #endif
566     case StoreDouble:
567         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
568         break;
569     default:
570         RELEASE_ASSERT_NOT_REACHED();
571     }
572 }
573     
574 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
575 {
576 #if USE(JSVALUE32_64)
577     UNUSED_PARAM(canTrample);
578 #endif
579     switch (plan.fillAction()) {
580     case DoNothingForFill:
581         break;
582     case SetInt32Constant:
583         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
584         break;
585 #if USE(JSVALUE64)
586     case SetInt52Constant:
587         m_jit.move(Imm64(plan.node()->asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
588         break;
589     case SetStrictInt52Constant:
590         m_jit.move(Imm64(plan.node()->asMachineInt()), plan.gpr());
591         break;
592 #endif // USE(JSVALUE64)
593     case SetBooleanConstant:
594         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
595         break;
596     case SetCellConstant:
597         m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
598         break;
599 #if USE(JSVALUE64)
600     case SetTrustedJSConstant:
601         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
602         break;
603     case SetJSConstant:
604         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
605         break;
606     case SetDoubleConstant:
607         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
608         m_jit.move64ToDouble(canTrample, plan.fpr());
609         break;
610     case Load32PayloadBoxInt:
611         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
612         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
613         break;
614     case Load32PayloadConvertToInt52:
615         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
616         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
617         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
618         break;
619     case Load32PayloadSignExtend:
620         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
621         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
622         break;
623 #else
624     case SetJSConstantTag:
625         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
626         break;
627     case SetJSConstantPayload:
628         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
629         break;
630     case SetInt32Tag:
631         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
632         break;
633     case SetCellTag:
634         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
635         break;
636     case SetBooleanTag:
637         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
638         break;
639     case SetDoubleConstant:
640         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
641         break;
642 #endif
643     case Load32Tag:
644         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
645         break;
646     case Load32Payload:
647         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
648         break;
649     case LoadPtr:
650         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
651         break;
652 #if USE(JSVALUE64)
653     case Load64:
654         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
655         break;
656     case Load64ShiftInt52Right:
657         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
658         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
659         break;
660     case Load64ShiftInt52Left:
661         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
662         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
663         break;
664 #endif
665     case LoadDouble:
666         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
667         break;
668     default:
669         RELEASE_ASSERT_NOT_REACHED();
670     }
671 }
672     
673 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
674 {
675     switch (arrayMode.arrayClass()) {
676     case Array::OriginalArray: {
677         CRASH();
678 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
679         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
680         return result;
681 #endif
682     }
683         
684     case Array::Array:
685         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
686         return m_jit.branch32(
687             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
688         
689     case Array::NonArray:
690     case Array::OriginalNonArray:
691         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
692         return m_jit.branch32(
693             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
694         
695     case Array::PossiblyArray:
696         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
697         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
698     }
699     
700     RELEASE_ASSERT_NOT_REACHED();
701     return JITCompiler::Jump();
702 }
703
704 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
705 {
706     JITCompiler::JumpList result;
707     
708     switch (arrayMode.type()) {
709     case Array::Int32:
710         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
711
712     case Array::Double:
713         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
714
715     case Array::Contiguous:
716         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
717
718     case Array::Undecided:
719         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
720
721     case Array::ArrayStorage:
722     case Array::SlowPutArrayStorage: {
723         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
724         
725         if (arrayMode.isJSArray()) {
726             if (arrayMode.isSlowPut()) {
727                 result.append(
728                     m_jit.branchTest32(
729                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
730                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
731                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
732                 result.append(
733                     m_jit.branch32(
734                         MacroAssembler::Above, tempGPR,
735                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
736                 break;
737             }
738             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
739             result.append(
740                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
741             break;
742         }
743         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
744         if (arrayMode.isSlowPut()) {
745             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
746             result.append(
747                 m_jit.branch32(
748                     MacroAssembler::Above, tempGPR,
749                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
750             break;
751         }
752         result.append(
753             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
754         break;
755     }
756     default:
757         CRASH();
758         break;
759     }
760     
761     return result;
762 }
763
764 void SpeculativeJIT::checkArray(Node* node)
765 {
766     ASSERT(node->arrayMode().isSpecific());
767     ASSERT(!node->arrayMode().doesConversion());
768     
769     SpeculateCellOperand base(this, node->child1());
770     GPRReg baseReg = base.gpr();
771     
772     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
773         noResult(m_currentNode);
774         return;
775     }
776     
777     const ClassInfo* expectedClassInfo = 0;
778     
779     switch (node->arrayMode().type()) {
780     case Array::AnyTypedArray:
781     case Array::String:
782         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
783         break;
784     case Array::Int32:
785     case Array::Double:
786     case Array::Contiguous:
787     case Array::Undecided:
788     case Array::ArrayStorage:
789     case Array::SlowPutArrayStorage: {
790         GPRTemporary temp(this);
791         GPRReg tempGPR = temp.gpr();
792         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
793         speculationCheck(
794             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
795             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
796         
797         noResult(m_currentNode);
798         return;
799     }
800     case Array::DirectArguments:
801         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
802         noResult(m_currentNode);
803         return;
804     case Array::ScopedArguments:
805         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
806         noResult(m_currentNode);
807         return;
808     default:
809         speculateCellTypeWithoutTypeFiltering(
810             node->child1(), baseReg,
811             typeForTypedArrayType(node->arrayMode().typedArrayType()));
812         noResult(m_currentNode);
813         return;
814     }
815     
816     RELEASE_ASSERT(expectedClassInfo);
817     
818     GPRTemporary temp(this);
819     GPRTemporary temp2(this);
820     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
821     speculationCheck(
822         BadType, JSValueSource::unboxedCell(baseReg), node,
823         m_jit.branchPtr(
824             MacroAssembler::NotEqual,
825             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
826             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
827     
828     noResult(m_currentNode);
829 }
830
831 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
832 {
833     ASSERT(node->arrayMode().doesConversion());
834     
835     GPRTemporary temp(this);
836     GPRTemporary structure;
837     GPRReg tempGPR = temp.gpr();
838     GPRReg structureGPR = InvalidGPRReg;
839     
840     if (node->op() != ArrayifyToStructure) {
841         GPRTemporary realStructure(this);
842         structure.adopt(realStructure);
843         structureGPR = structure.gpr();
844     }
845         
846     // We can skip all that comes next if we already have array storage.
847     MacroAssembler::JumpList slowPath;
848     
849     if (node->op() == ArrayifyToStructure) {
850         slowPath.append(m_jit.branchWeakStructure(
851             JITCompiler::NotEqual,
852             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
853             node->structure()));
854     } else {
855         m_jit.load8(
856             MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
857         
858         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
859     }
860     
861     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
862         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
863     
864     noResult(m_currentNode);
865 }
866
867 void SpeculativeJIT::arrayify(Node* node)
868 {
869     ASSERT(node->arrayMode().isSpecific());
870     
871     SpeculateCellOperand base(this, node->child1());
872     
873     if (!node->child2()) {
874         arrayify(node, base.gpr(), InvalidGPRReg);
875         return;
876     }
877     
878     SpeculateInt32Operand property(this, node->child2());
879     
880     arrayify(node, base.gpr(), property.gpr());
881 }
882
883 GPRReg SpeculativeJIT::fillStorage(Edge edge)
884 {
885     VirtualRegister virtualRegister = edge->virtualRegister();
886     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
887     
888     switch (info.registerFormat()) {
889     case DataFormatNone: {
890         if (info.spillFormat() == DataFormatStorage) {
891             GPRReg gpr = allocate();
892             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
893             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
894             info.fillStorage(*m_stream, gpr);
895             return gpr;
896         }
897         
898         // Must be a cell; fill it as a cell and then return the pointer.
899         return fillSpeculateCell(edge);
900     }
901         
902     case DataFormatStorage: {
903         GPRReg gpr = info.gpr();
904         m_gprs.lock(gpr);
905         return gpr;
906     }
907         
908     default:
909         return fillSpeculateCell(edge);
910     }
911 }
912
913 void SpeculativeJIT::useChildren(Node* node)
914 {
915     if (node->flags() & NodeHasVarArgs) {
916         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
917             if (!!m_jit.graph().m_varArgChildren[childIdx])
918                 use(m_jit.graph().m_varArgChildren[childIdx]);
919         }
920     } else {
921         Edge child1 = node->child1();
922         if (!child1) {
923             ASSERT(!node->child2() && !node->child3());
924             return;
925         }
926         use(child1);
927         
928         Edge child2 = node->child2();
929         if (!child2) {
930             ASSERT(!node->child3());
931             return;
932         }
933         use(child2);
934         
935         Edge child3 = node->child3();
936         if (!child3)
937             return;
938         use(child3);
939     }
940 }
941
942 void SpeculativeJIT::compileIn(Node* node)
943 {
944     SpeculateCellOperand base(this, node->child2());
945     GPRReg baseGPR = base.gpr();
946     
947     if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
948         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
949             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
950             
951             GPRTemporary result(this);
952             GPRReg resultGPR = result.gpr();
953
954             use(node->child1());
955             
956             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
957             MacroAssembler::Label done = m_jit.label();
958             
959             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
960             // we can cast it to const AtomicStringImpl* safely.
961             auto slowPath = slowPathCall(
962                 jump.m_jump, this, operationInOptimize,
963                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
964                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
965             
966             stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
967             stubInfo->codeOrigin = node->origin.semantic;
968             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
969             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
970 #if USE(JSVALUE32_64)
971             stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
972             stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
973 #endif
974             stubInfo->patch.usedRegisters = usedRegisters();
975
976             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
977             addSlowPathGenerator(WTFMove(slowPath));
978
979             base.use();
980
981             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
982             return;
983         }
984     }
985
986     JSValueOperand key(this, node->child1());
987     JSValueRegs regs = key.jsValueRegs();
988         
989     GPRFlushedCallResult result(this);
990     GPRReg resultGPR = result.gpr();
991         
992     base.use();
993     key.use();
994         
995     flushRegisters();
996     callOperation(
997         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
998         baseGPR, regs);
999     m_jit.exceptionCheck();
1000     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1001 }
1002
1003 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1004 {
1005     unsigned branchIndexInBlock = detectPeepHoleBranch();
1006     if (branchIndexInBlock != UINT_MAX) {
1007         Node* branchNode = m_block->at(branchIndexInBlock);
1008
1009         ASSERT(node->adjustedRefCount() == 1);
1010         
1011         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1012     
1013         m_indexInBlock = branchIndexInBlock;
1014         m_currentNode = branchNode;
1015         
1016         return true;
1017     }
1018     
1019     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1020     
1021     return false;
1022 }
1023
1024 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1025 {
1026     unsigned branchIndexInBlock = detectPeepHoleBranch();
1027     if (branchIndexInBlock != UINT_MAX) {
1028         Node* branchNode = m_block->at(branchIndexInBlock);
1029
1030         ASSERT(node->adjustedRefCount() == 1);
1031         
1032         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1033     
1034         m_indexInBlock = branchIndexInBlock;
1035         m_currentNode = branchNode;
1036         
1037         return true;
1038     }
1039     
1040     nonSpeculativeNonPeepholeStrictEq(node, invert);
1041     
1042     return false;
1043 }
1044
1045 static const char* dataFormatString(DataFormat format)
1046 {
1047     // These values correspond to the DataFormat enum.
1048     const char* strings[] = {
1049         "[  ]",
1050         "[ i]",
1051         "[ d]",
1052         "[ c]",
1053         "Err!",
1054         "Err!",
1055         "Err!",
1056         "Err!",
1057         "[J ]",
1058         "[Ji]",
1059         "[Jd]",
1060         "[Jc]",
1061         "Err!",
1062         "Err!",
1063         "Err!",
1064         "Err!",
1065     };
1066     return strings[format];
1067 }
1068
1069 void SpeculativeJIT::dump(const char* label)
1070 {
1071     if (label)
1072         dataLogF("<%s>\n", label);
1073
1074     dataLogF("  gprs:\n");
1075     m_gprs.dump();
1076     dataLogF("  fprs:\n");
1077     m_fprs.dump();
1078     dataLogF("  VirtualRegisters:\n");
1079     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1080         GenerationInfo& info = m_generationInfo[i];
1081         if (info.alive())
1082             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1083         else
1084             dataLogF("    % 3d:[__][__]", i);
1085         if (info.registerFormat() == DataFormatDouble)
1086             dataLogF(":fpr%d\n", info.fpr());
1087         else if (info.registerFormat() != DataFormatNone
1088 #if USE(JSVALUE32_64)
1089             && !(info.registerFormat() & DataFormatJS)
1090 #endif
1091             ) {
1092             ASSERT(info.gpr() != InvalidGPRReg);
1093             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1094         } else
1095             dataLogF("\n");
1096     }
1097     if (label)
1098         dataLogF("</%s>\n", label);
1099 }
1100
1101 GPRTemporary::GPRTemporary()
1102     : m_jit(0)
1103     , m_gpr(InvalidGPRReg)
1104 {
1105 }
1106
1107 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1108     : m_jit(jit)
1109     , m_gpr(InvalidGPRReg)
1110 {
1111     m_gpr = m_jit->allocate();
1112 }
1113
1114 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1115     : m_jit(jit)
1116     , m_gpr(InvalidGPRReg)
1117 {
1118     m_gpr = m_jit->allocate(specific);
1119 }
1120
1121 #if USE(JSVALUE32_64)
1122 GPRTemporary::GPRTemporary(
1123     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1124     : m_jit(jit)
1125     , m_gpr(InvalidGPRReg)
1126 {
1127     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1128         m_gpr = m_jit->reuse(op1.gpr(which));
1129     else
1130         m_gpr = m_jit->allocate();
1131 }
1132 #endif // USE(JSVALUE32_64)
1133
1134 JSValueRegsTemporary::JSValueRegsTemporary() { }
1135
1136 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1137 #if USE(JSVALUE64)
1138     : m_gpr(jit)
1139 #else
1140     : m_payloadGPR(jit)
1141     , m_tagGPR(jit)
1142 #endif
1143 {
1144 }
1145
1146 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1147
1148 JSValueRegs JSValueRegsTemporary::regs()
1149 {
1150 #if USE(JSVALUE64)
1151     return JSValueRegs(m_gpr.gpr());
1152 #else
1153     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1154 #endif
1155 }
1156
1157 void GPRTemporary::adopt(GPRTemporary& other)
1158 {
1159     ASSERT(!m_jit);
1160     ASSERT(m_gpr == InvalidGPRReg);
1161     ASSERT(other.m_jit);
1162     ASSERT(other.m_gpr != InvalidGPRReg);
1163     m_jit = other.m_jit;
1164     m_gpr = other.m_gpr;
1165     other.m_jit = 0;
1166     other.m_gpr = InvalidGPRReg;
1167 }
1168
1169 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1170     : m_jit(jit)
1171     , m_fpr(InvalidFPRReg)
1172 {
1173     m_fpr = m_jit->fprAllocate();
1174 }
1175
1176 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1177     : m_jit(jit)
1178     , m_fpr(InvalidFPRReg)
1179 {
1180     if (m_jit->canReuse(op1.node()))
1181         m_fpr = m_jit->reuse(op1.fpr());
1182     else
1183         m_fpr = m_jit->fprAllocate();
1184 }
1185
1186 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1187     : m_jit(jit)
1188     , m_fpr(InvalidFPRReg)
1189 {
1190     if (m_jit->canReuse(op1.node()))
1191         m_fpr = m_jit->reuse(op1.fpr());
1192     else if (m_jit->canReuse(op2.node()))
1193         m_fpr = m_jit->reuse(op2.fpr());
1194     else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1195         m_fpr = m_jit->reuse(op1.fpr());
1196     else
1197         m_fpr = m_jit->fprAllocate();
1198 }
1199
1200 #if USE(JSVALUE32_64)
1201 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1202     : m_jit(jit)
1203     , m_fpr(InvalidFPRReg)
1204 {
1205     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1206         m_fpr = m_jit->reuse(op1.fpr());
1207     else
1208         m_fpr = m_jit->fprAllocate();
1209 }
1210 #endif
1211
1212 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1213 {
1214     BasicBlock* taken = branchNode->branchData()->taken.block;
1215     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1216     
1217     SpeculateDoubleOperand op1(this, node->child1());
1218     SpeculateDoubleOperand op2(this, node->child2());
1219     
1220     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1221     jump(notTaken);
1222 }
1223
1224 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1225 {
1226     BasicBlock* taken = branchNode->branchData()->taken.block;
1227     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1228
1229     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1230     
1231     if (taken == nextBlock()) {
1232         condition = MacroAssembler::NotEqual;
1233         BasicBlock* tmp = taken;
1234         taken = notTaken;
1235         notTaken = tmp;
1236     }
1237
1238     SpeculateCellOperand op1(this, node->child1());
1239     SpeculateCellOperand op2(this, node->child2());
1240     
1241     GPRReg op1GPR = op1.gpr();
1242     GPRReg op2GPR = op2.gpr();
1243     
1244     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1245         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1246             speculationCheck(
1247                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1248         }
1249         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1250             speculationCheck(
1251                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1252         }
1253     } else {
1254         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1255             speculationCheck(
1256                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1257                 m_jit.branchIfNotObject(op1GPR));
1258         }
1259         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1260             m_jit.branchTest8(
1261                 MacroAssembler::NonZero, 
1262                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1263                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1264
1265         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1266             speculationCheck(
1267                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1268                 m_jit.branchIfNotObject(op2GPR));
1269         }
1270         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1271             m_jit.branchTest8(
1272                 MacroAssembler::NonZero, 
1273                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1274                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1275     }
1276
1277     branchPtr(condition, op1GPR, op2GPR, taken);
1278     jump(notTaken);
1279 }
1280
1281 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1282 {
1283     BasicBlock* taken = branchNode->branchData()->taken.block;
1284     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1285
1286     // The branch instruction will branch to the taken block.
1287     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1288     if (taken == nextBlock()) {
1289         condition = JITCompiler::invert(condition);
1290         BasicBlock* tmp = taken;
1291         taken = notTaken;
1292         notTaken = tmp;
1293     }
1294
1295     if (node->child1()->isBooleanConstant()) {
1296         bool imm = node->child1()->asBoolean();
1297         SpeculateBooleanOperand op2(this, node->child2());
1298         branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1299     } else if (node->child2()->isBooleanConstant()) {
1300         SpeculateBooleanOperand op1(this, node->child1());
1301         bool imm = node->child2()->asBoolean();
1302         branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1303     } else {
1304         SpeculateBooleanOperand op1(this, node->child1());
1305         SpeculateBooleanOperand op2(this, node->child2());
1306         branch32(condition, op1.gpr(), op2.gpr(), taken);
1307     }
1308
1309     jump(notTaken);
1310 }
1311
1312 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1313 {
1314     BasicBlock* taken = branchNode->branchData()->taken.block;
1315     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1316
1317     // The branch instruction will branch to the taken block.
1318     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1319     if (taken == nextBlock()) {
1320         condition = JITCompiler::invert(condition);
1321         BasicBlock* tmp = taken;
1322         taken = notTaken;
1323         notTaken = tmp;
1324     }
1325
1326     if (node->child1()->isInt32Constant()) {
1327         int32_t imm = node->child1()->asInt32();
1328         SpeculateInt32Operand op2(this, node->child2());
1329         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1330     } else if (node->child2()->isInt32Constant()) {
1331         SpeculateInt32Operand op1(this, node->child1());
1332         int32_t imm = node->child2()->asInt32();
1333         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1334     } else {
1335         SpeculateInt32Operand op1(this, node->child1());
1336         SpeculateInt32Operand op2(this, node->child2());
1337         branch32(condition, op1.gpr(), op2.gpr(), taken);
1338     }
1339
1340     jump(notTaken);
1341 }
1342
1343 // Returns true if the compare is fused with a subsequent branch.
1344 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1345 {
1346     // Fused compare & branch.
1347     unsigned branchIndexInBlock = detectPeepHoleBranch();
1348     if (branchIndexInBlock != UINT_MAX) {
1349         Node* branchNode = m_block->at(branchIndexInBlock);
1350
1351         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1352         // so can be no intervening nodes to also reference the compare. 
1353         ASSERT(node->adjustedRefCount() == 1);
1354
1355         if (node->isBinaryUseKind(Int32Use))
1356             compilePeepHoleInt32Branch(node, branchNode, condition);
1357 #if USE(JSVALUE64)
1358         else if (node->isBinaryUseKind(Int52RepUse))
1359             compilePeepHoleInt52Branch(node, branchNode, condition);
1360 #endif // USE(JSVALUE64)
1361         else if (node->isBinaryUseKind(DoubleRepUse))
1362             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1363         else if (node->op() == CompareEq) {
1364             if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1365                 // Use non-peephole comparison, for now.
1366                 return false;
1367             }
1368             if (node->isBinaryUseKind(BooleanUse))
1369                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1370             else if (node->isBinaryUseKind(SymbolUse))
1371                 compilePeepHoleSymbolEquality(node, branchNode);
1372             else if (node->isBinaryUseKind(ObjectUse))
1373                 compilePeepHoleObjectEquality(node, branchNode);
1374             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1375                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1376             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1377                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1378             else if (!needsTypeCheck(node->child1(), SpecOther))
1379                 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1380             else if (!needsTypeCheck(node->child2(), SpecOther))
1381                 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1382             else {
1383                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1384                 return true;
1385             }
1386         } else {
1387             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1388             return true;
1389         }
1390
1391         use(node->child1());
1392         use(node->child2());
1393         m_indexInBlock = branchIndexInBlock;
1394         m_currentNode = branchNode;
1395         return true;
1396     }
1397     return false;
1398 }
1399
1400 void SpeculativeJIT::noticeOSRBirth(Node* node)
1401 {
1402     if (!node->hasVirtualRegister())
1403         return;
1404     
1405     VirtualRegister virtualRegister = node->virtualRegister();
1406     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1407     
1408     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1409 }
1410
1411 void SpeculativeJIT::compileMovHint(Node* node)
1412 {
1413     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1414     
1415     Node* child = node->child1().node();
1416     noticeOSRBirth(child);
1417     
1418     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1419 }
1420
1421 void SpeculativeJIT::bail(AbortReason reason)
1422 {
1423     if (verboseCompilationEnabled())
1424         dataLog("Bailing compilation.\n");
1425     m_compileOkay = true;
1426     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1427     clearGenerationInfo();
1428 }
1429
1430 void SpeculativeJIT::compileCurrentBlock()
1431 {
1432     ASSERT(m_compileOkay);
1433     
1434     if (!m_block)
1435         return;
1436     
1437     ASSERT(m_block->isReachable);
1438     
1439     m_jit.blockHeads()[m_block->index] = m_jit.label();
1440
1441     if (!m_block->intersectionOfCFAHasVisited) {
1442         // Don't generate code for basic blocks that are unreachable according to CFA.
1443         // But to be sure that nobody has generated a jump to this block, drop in a
1444         // breakpoint here.
1445         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1446         return;
1447     }
1448
1449     m_stream->appendAndLog(VariableEvent::reset());
1450     
1451     m_jit.jitAssertHasValidCallFrame();
1452     m_jit.jitAssertTagsInPlace();
1453     m_jit.jitAssertArgumentCountSane();
1454
1455     m_state.reset();
1456     m_state.beginBasicBlock(m_block);
1457     
1458     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1459         int operand = m_block->variablesAtHead.operandForIndex(i);
1460         Node* node = m_block->variablesAtHead[i];
1461         if (!node)
1462             continue; // No need to record dead SetLocal's.
1463         
1464         VariableAccessData* variable = node->variableAccessData();
1465         DataFormat format;
1466         if (!node->refCount())
1467             continue; // No need to record dead SetLocal's.
1468         format = dataFormatFor(variable->flushFormat());
1469         m_stream->appendAndLog(
1470             VariableEvent::setLocal(
1471                 VirtualRegister(operand),
1472                 variable->machineLocal(),
1473                 format));
1474     }
1475
1476     m_origin = NodeOrigin();
1477     
1478     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1479         m_currentNode = m_block->at(m_indexInBlock);
1480         
1481         // We may have hit a contradiction that the CFA was aware of but that the JIT
1482         // didn't cause directly.
1483         if (!m_state.isValid()) {
1484             bail(DFGBailedAtTopOfBlock);
1485             return;
1486         }
1487
1488         m_interpreter.startExecuting();
1489         m_jit.setForNode(m_currentNode);
1490         m_origin = m_currentNode->origin;
1491         if (validationEnabled())
1492             m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1493         m_lastGeneratedNode = m_currentNode->op();
1494         
1495         ASSERT(m_currentNode->shouldGenerate());
1496         
1497         if (verboseCompilationEnabled()) {
1498             dataLogF(
1499                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1500                 (int)m_currentNode->index(),
1501                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1502             dataLog("\n");
1503         }
1504
1505         if (Options::validateDFGExceptionHandling() && mayExit(m_jit.graph(), m_currentNode) != DoesNotExit)
1506             m_jit.jitReleaseAssertNoException();
1507
1508         compile(m_currentNode);
1509         
1510         if (belongsInMinifiedGraph(m_currentNode->op()))
1511             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1512         
1513 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1514         m_jit.clearRegisterAllocationOffsets();
1515 #endif
1516         
1517         if (!m_compileOkay) {
1518             bail(DFGBailedAtEndOfNode);
1519             return;
1520         }
1521         
1522         // Make sure that the abstract state is rematerialized for the next node.
1523         m_interpreter.executeEffects(m_indexInBlock);
1524     }
1525     
1526     // Perform the most basic verification that children have been used correctly.
1527     if (!ASSERT_DISABLED) {
1528         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1529             GenerationInfo& info = m_generationInfo[index];
1530             RELEASE_ASSERT(!info.alive());
1531         }
1532     }
1533 }
1534
1535 // If we are making type predictions about our arguments then
1536 // we need to check that they are correct on function entry.
1537 void SpeculativeJIT::checkArgumentTypes()
1538 {
1539     ASSERT(!m_currentNode);
1540     m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1541
1542     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1543         Node* node = m_jit.graph().m_arguments[i];
1544         if (!node) {
1545             // The argument is dead. We don't do any checks for such arguments.
1546             continue;
1547         }
1548         
1549         ASSERT(node->op() == SetArgument);
1550         ASSERT(node->shouldGenerate());
1551
1552         VariableAccessData* variableAccessData = node->variableAccessData();
1553         FlushFormat format = variableAccessData->flushFormat();
1554         
1555         if (format == FlushedJSValue)
1556             continue;
1557         
1558         VirtualRegister virtualRegister = variableAccessData->local();
1559
1560         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1561         
1562 #if USE(JSVALUE64)
1563         switch (format) {
1564         case FlushedInt32: {
1565             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1566             break;
1567         }
1568         case FlushedBoolean: {
1569             GPRTemporary temp(this);
1570             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1571             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1572             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1573             break;
1574         }
1575         case FlushedCell: {
1576             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1577             break;
1578         }
1579         default:
1580             RELEASE_ASSERT_NOT_REACHED();
1581             break;
1582         }
1583 #else
1584         switch (format) {
1585         case FlushedInt32: {
1586             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1587             break;
1588         }
1589         case FlushedBoolean: {
1590             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1591             break;
1592         }
1593         case FlushedCell: {
1594             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1595             break;
1596         }
1597         default:
1598             RELEASE_ASSERT_NOT_REACHED();
1599             break;
1600         }
1601 #endif
1602     }
1603
1604     m_origin = NodeOrigin();
1605 }
1606
1607 bool SpeculativeJIT::compile()
1608 {
1609     checkArgumentTypes();
1610     
1611     ASSERT(!m_currentNode);
1612     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1613         m_jit.setForBlockIndex(blockIndex);
1614         m_block = m_jit.graph().block(blockIndex);
1615         compileCurrentBlock();
1616     }
1617     linkBranches();
1618     return true;
1619 }
1620
1621 void SpeculativeJIT::createOSREntries()
1622 {
1623     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1624         BasicBlock* block = m_jit.graph().block(blockIndex);
1625         if (!block)
1626             continue;
1627         if (!block->isOSRTarget)
1628             continue;
1629         
1630         // Currently we don't have OSR entry trampolines. We could add them
1631         // here if need be.
1632         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1633     }
1634 }
1635
1636 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1637 {
1638     unsigned osrEntryIndex = 0;
1639     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1640         BasicBlock* block = m_jit.graph().block(blockIndex);
1641         if (!block)
1642             continue;
1643         if (!block->isOSRTarget)
1644             continue;
1645         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1646     }
1647     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1648     
1649     if (verboseCompilationEnabled()) {
1650         DumpContext dumpContext;
1651         dataLog("OSR Entries:\n");
1652         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1653             dataLog("    ", inContext(entryData, &dumpContext), "\n");
1654         if (!dumpContext.isEmpty())
1655             dumpContext.dump(WTF::dataFile());
1656     }
1657 }
1658
1659 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1660 {
1661     Edge child3 = m_jit.graph().varArgChild(node, 2);
1662     Edge child4 = m_jit.graph().varArgChild(node, 3);
1663
1664     ArrayMode arrayMode = node->arrayMode();
1665     
1666     GPRReg baseReg = base.gpr();
1667     GPRReg propertyReg = property.gpr();
1668     
1669     SpeculateDoubleOperand value(this, child3);
1670
1671     FPRReg valueReg = value.fpr();
1672     
1673     DFG_TYPE_CHECK(
1674         JSValueRegs(), child3, SpecFullRealNumber,
1675         m_jit.branchDouble(
1676             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1677     
1678     if (!m_compileOkay)
1679         return;
1680     
1681     StorageOperand storage(this, child4);
1682     GPRReg storageReg = storage.gpr();
1683
1684     if (node->op() == PutByValAlias) {
1685         // Store the value to the array.
1686         GPRReg propertyReg = property.gpr();
1687         FPRReg valueReg = value.fpr();
1688         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1689         
1690         noResult(m_currentNode);
1691         return;
1692     }
1693     
1694     GPRTemporary temporary;
1695     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1696
1697     MacroAssembler::Jump slowCase;
1698     
1699     if (arrayMode.isInBounds()) {
1700         speculationCheck(
1701             OutOfBounds, JSValueRegs(), 0,
1702             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1703     } else {
1704         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1705         
1706         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1707         
1708         if (!arrayMode.isOutOfBounds())
1709             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1710         
1711         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1712         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1713         
1714         inBounds.link(&m_jit);
1715     }
1716     
1717     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1718
1719     base.use();
1720     property.use();
1721     value.use();
1722     storage.use();
1723     
1724     if (arrayMode.isOutOfBounds()) {
1725         addSlowPathGenerator(
1726             slowPathCall(
1727                 slowCase, this,
1728                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1729                 NoResult, baseReg, propertyReg, valueReg));
1730     }
1731
1732     noResult(m_currentNode, UseChildrenCalledExplicitly);
1733 }
1734
1735 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1736 {
1737     SpeculateCellOperand string(this, node->child1());
1738     SpeculateStrictInt32Operand index(this, node->child2());
1739     StorageOperand storage(this, node->child3());
1740
1741     GPRReg stringReg = string.gpr();
1742     GPRReg indexReg = index.gpr();
1743     GPRReg storageReg = storage.gpr();
1744     
1745     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1746
1747     // unsigned comparison so we can filter out negative indices and indices that are too large
1748     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1749
1750     GPRTemporary scratch(this);
1751     GPRReg scratchReg = scratch.gpr();
1752
1753     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1754
1755     // Load the character into scratchReg
1756     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1757
1758     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1759     JITCompiler::Jump cont8Bit = m_jit.jump();
1760
1761     is16Bit.link(&m_jit);
1762
1763     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1764
1765     cont8Bit.link(&m_jit);
1766
1767     int32Result(scratchReg, m_currentNode);
1768 }
1769
1770 void SpeculativeJIT::compileGetByValOnString(Node* node)
1771 {
1772     SpeculateCellOperand base(this, node->child1());
1773     SpeculateStrictInt32Operand property(this, node->child2());
1774     StorageOperand storage(this, node->child3());
1775     GPRReg baseReg = base.gpr();
1776     GPRReg propertyReg = property.gpr();
1777     GPRReg storageReg = storage.gpr();
1778
1779     GPRTemporary scratch(this);
1780     GPRReg scratchReg = scratch.gpr();
1781 #if USE(JSVALUE32_64)
1782     GPRTemporary resultTag;
1783     GPRReg resultTagReg = InvalidGPRReg;
1784     if (node->arrayMode().isOutOfBounds()) {
1785         GPRTemporary realResultTag(this);
1786         resultTag.adopt(realResultTag);
1787         resultTagReg = resultTag.gpr();
1788     }
1789 #endif
1790
1791     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1792
1793     // unsigned comparison so we can filter out negative indices and indices that are too large
1794     JITCompiler::Jump outOfBounds = m_jit.branch32(
1795         MacroAssembler::AboveOrEqual, propertyReg,
1796         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1797     if (node->arrayMode().isInBounds())
1798         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1799
1800     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1801
1802     // Load the character into scratchReg
1803     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1804
1805     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1806     JITCompiler::Jump cont8Bit = m_jit.jump();
1807
1808     is16Bit.link(&m_jit);
1809
1810     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1811
1812     JITCompiler::Jump bigCharacter =
1813         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1814
1815     // 8 bit string values don't need the isASCII check.
1816     cont8Bit.link(&m_jit);
1817
1818     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1819     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1820     m_jit.loadPtr(scratchReg, scratchReg);
1821
1822     addSlowPathGenerator(
1823         slowPathCall(
1824             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1825
1826     if (node->arrayMode().isOutOfBounds()) {
1827 #if USE(JSVALUE32_64)
1828         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1829 #endif
1830
1831         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1832         if (globalObject->stringPrototypeChainIsSane()) {
1833             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
1834             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
1835             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
1836             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
1837             // indexed properties either.
1838             // https://bugs.webkit.org/show_bug.cgi?id=144668
1839             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
1840             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
1841             
1842 #if USE(JSVALUE64)
1843             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1844                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
1845 #else
1846             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1847                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
1848                 baseReg, propertyReg));
1849 #endif
1850         } else {
1851 #if USE(JSVALUE64)
1852             addSlowPathGenerator(
1853                 slowPathCall(
1854                     outOfBounds, this, operationGetByValStringInt,
1855                     scratchReg, baseReg, propertyReg));
1856 #else
1857             addSlowPathGenerator(
1858                 slowPathCall(
1859                     outOfBounds, this, operationGetByValStringInt,
1860                     resultTagReg, scratchReg, baseReg, propertyReg));
1861 #endif
1862         }
1863         
1864 #if USE(JSVALUE64)
1865         jsValueResult(scratchReg, m_currentNode);
1866 #else
1867         jsValueResult(resultTagReg, scratchReg, m_currentNode);
1868 #endif
1869     } else
1870         cellResult(scratchReg, m_currentNode);
1871 }
1872
1873 void SpeculativeJIT::compileFromCharCode(Node* node)
1874 {
1875     SpeculateStrictInt32Operand property(this, node->child1());
1876     GPRReg propertyReg = property.gpr();
1877     GPRTemporary smallStrings(this);
1878     GPRTemporary scratch(this);
1879     GPRReg scratchReg = scratch.gpr();
1880     GPRReg smallStringsReg = smallStrings.gpr();
1881
1882     JITCompiler::JumpList slowCases;
1883     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
1884     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
1885     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
1886
1887     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
1888     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
1889     cellResult(scratchReg, m_currentNode);
1890 }
1891
1892 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
1893 {
1894     VirtualRegister virtualRegister = node->virtualRegister();
1895     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1896
1897     switch (info.registerFormat()) {
1898     case DataFormatStorage:
1899         RELEASE_ASSERT_NOT_REACHED();
1900
1901     case DataFormatBoolean:
1902     case DataFormatCell:
1903         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1904         return GeneratedOperandTypeUnknown;
1905
1906     case DataFormatNone:
1907     case DataFormatJSCell:
1908     case DataFormatJS:
1909     case DataFormatJSBoolean:
1910     case DataFormatJSDouble:
1911         return GeneratedOperandJSValue;
1912
1913     case DataFormatJSInt32:
1914     case DataFormatInt32:
1915         return GeneratedOperandInteger;
1916
1917     default:
1918         RELEASE_ASSERT_NOT_REACHED();
1919         return GeneratedOperandTypeUnknown;
1920     }
1921 }
1922
1923 void SpeculativeJIT::compileValueToInt32(Node* node)
1924 {
1925     switch (node->child1().useKind()) {
1926 #if USE(JSVALUE64)
1927     case Int52RepUse: {
1928         SpeculateStrictInt52Operand op1(this, node->child1());
1929         GPRTemporary result(this, Reuse, op1);
1930         GPRReg op1GPR = op1.gpr();
1931         GPRReg resultGPR = result.gpr();
1932         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
1933         int32Result(resultGPR, node, DataFormatInt32);
1934         return;
1935     }
1936 #endif // USE(JSVALUE64)
1937         
1938     case DoubleRepUse: {
1939         GPRTemporary result(this);
1940         SpeculateDoubleOperand op1(this, node->child1());
1941         FPRReg fpr = op1.fpr();
1942         GPRReg gpr = result.gpr();
1943         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
1944         
1945         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
1946         
1947         int32Result(gpr, node);
1948         return;
1949     }
1950     
1951     case NumberUse:
1952     case NotCellUse: {
1953         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
1954         case GeneratedOperandInteger: {
1955             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
1956             GPRTemporary result(this, Reuse, op1);
1957             m_jit.move(op1.gpr(), result.gpr());
1958             int32Result(result.gpr(), node, op1.format());
1959             return;
1960         }
1961         case GeneratedOperandJSValue: {
1962             GPRTemporary result(this);
1963 #if USE(JSVALUE64)
1964             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1965
1966             GPRReg gpr = op1.gpr();
1967             GPRReg resultGpr = result.gpr();
1968             FPRTemporary tempFpr(this);
1969             FPRReg fpr = tempFpr.fpr();
1970
1971             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
1972             JITCompiler::JumpList converted;
1973
1974             if (node->child1().useKind() == NumberUse) {
1975                 DFG_TYPE_CHECK(
1976                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
1977                     m_jit.branchTest64(
1978                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
1979             } else {
1980                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
1981                 
1982                 DFG_TYPE_CHECK(
1983                     JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
1984                 
1985                 // It's not a cell: so true turns into 1 and all else turns into 0.
1986                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
1987                 converted.append(m_jit.jump());
1988                 
1989                 isNumber.link(&m_jit);
1990             }
1991
1992             // First, if we get here we have a double encoded as a JSValue
1993             m_jit.move(gpr, resultGpr);
1994             unboxDouble(resultGpr, fpr);
1995
1996             silentSpillAllRegisters(resultGpr);
1997             callOperation(toInt32, resultGpr, fpr);
1998             silentFillAllRegisters(resultGpr);
1999
2000             converted.append(m_jit.jump());
2001
2002             isInteger.link(&m_jit);
2003             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2004
2005             converted.link(&m_jit);
2006 #else
2007             Node* childNode = node->child1().node();
2008             VirtualRegister virtualRegister = childNode->virtualRegister();
2009             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2010
2011             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2012
2013             GPRReg payloadGPR = op1.payloadGPR();
2014             GPRReg resultGpr = result.gpr();
2015         
2016             JITCompiler::JumpList converted;
2017
2018             if (info.registerFormat() == DataFormatJSInt32)
2019                 m_jit.move(payloadGPR, resultGpr);
2020             else {
2021                 GPRReg tagGPR = op1.tagGPR();
2022                 FPRTemporary tempFpr(this);
2023                 FPRReg fpr = tempFpr.fpr();
2024                 FPRTemporary scratch(this);
2025
2026                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2027
2028                 if (node->child1().useKind() == NumberUse) {
2029                     DFG_TYPE_CHECK(
2030                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2031                         m_jit.branch32(
2032                             MacroAssembler::AboveOrEqual, tagGPR,
2033                             TrustedImm32(JSValue::LowestTag)));
2034                 } else {
2035                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2036                     
2037                     DFG_TYPE_CHECK(
2038                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2039                         m_jit.branchIfCell(op1.jsValueRegs()));
2040                     
2041                     // It's not a cell: so true turns into 1 and all else turns into 0.
2042                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2043                     m_jit.move(TrustedImm32(0), resultGpr);
2044                     converted.append(m_jit.jump());
2045                     
2046                     isBoolean.link(&m_jit);
2047                     m_jit.move(payloadGPR, resultGpr);
2048                     converted.append(m_jit.jump());
2049                     
2050                     isNumber.link(&m_jit);
2051                 }
2052
2053                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2054
2055                 silentSpillAllRegisters(resultGpr);
2056                 callOperation(toInt32, resultGpr, fpr);
2057                 silentFillAllRegisters(resultGpr);
2058
2059                 converted.append(m_jit.jump());
2060
2061                 isInteger.link(&m_jit);
2062                 m_jit.move(payloadGPR, resultGpr);
2063
2064                 converted.link(&m_jit);
2065             }
2066 #endif
2067             int32Result(resultGpr, node);
2068             return;
2069         }
2070         case GeneratedOperandTypeUnknown:
2071             RELEASE_ASSERT(!m_compileOkay);
2072             return;
2073         }
2074         RELEASE_ASSERT_NOT_REACHED();
2075         return;
2076     }
2077     
2078     default:
2079         ASSERT(!m_compileOkay);
2080         return;
2081     }
2082 }
2083
2084 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2085 {
2086     if (doesOverflow(node->arithMode())) {
2087         // We know that this sometimes produces doubles. So produce a double every
2088         // time. This at least allows subsequent code to not have weird conditionals.
2089             
2090         SpeculateInt32Operand op1(this, node->child1());
2091         FPRTemporary result(this);
2092             
2093         GPRReg inputGPR = op1.gpr();
2094         FPRReg outputFPR = result.fpr();
2095             
2096         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2097             
2098         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2099         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2100         positive.link(&m_jit);
2101             
2102         doubleResult(outputFPR, node);
2103         return;
2104     }
2105     
2106     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2107
2108     SpeculateInt32Operand op1(this, node->child1());
2109     GPRTemporary result(this);
2110
2111     m_jit.move(op1.gpr(), result.gpr());
2112
2113     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2114
2115     int32Result(result.gpr(), node, op1.format());
2116 }
2117
2118 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2119 {
2120     SpeculateDoubleOperand op1(this, node->child1());
2121     FPRTemporary scratch(this);
2122     GPRTemporary result(this);
2123     
2124     FPRReg valueFPR = op1.fpr();
2125     FPRReg scratchFPR = scratch.fpr();
2126     GPRReg resultGPR = result.gpr();
2127
2128     JITCompiler::JumpList failureCases;
2129     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2130     m_jit.branchConvertDoubleToInt32(
2131         valueFPR, resultGPR, failureCases, scratchFPR,
2132         shouldCheckNegativeZero(node->arithMode()));
2133     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2134
2135     int32Result(resultGPR, node);
2136 }
2137
2138 void SpeculativeJIT::compileDoubleRep(Node* node)
2139 {
2140     switch (node->child1().useKind()) {
2141     case RealNumberUse: {
2142         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2143         FPRTemporary result(this);
2144         
2145         JSValueRegs op1Regs = op1.jsValueRegs();
2146         FPRReg resultFPR = result.fpr();
2147         
2148 #if USE(JSVALUE64)
2149         GPRTemporary temp(this);
2150         GPRReg tempGPR = temp.gpr();
2151         m_jit.move(op1Regs.gpr(), tempGPR);
2152         m_jit.unboxDoubleWithoutAssertions(tempGPR, resultFPR);
2153 #else
2154         FPRTemporary temp(this);
2155         FPRReg tempFPR = temp.fpr();
2156         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2157 #endif
2158         
2159         JITCompiler::Jump done = m_jit.branchDouble(
2160             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2161         
2162         DFG_TYPE_CHECK(
2163             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2164         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2165         
2166         done.link(&m_jit);
2167         
2168         doubleResult(resultFPR, node);
2169         return;
2170     }
2171     
2172     case NotCellUse:
2173     case NumberUse: {
2174         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2175
2176         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2177         if (isInt32Speculation(possibleTypes)) {
2178             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2179             FPRTemporary result(this);
2180             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2181             doubleResult(result.fpr(), node);
2182             return;
2183         }
2184
2185         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2186         FPRTemporary result(this);
2187
2188 #if USE(JSVALUE64)
2189         GPRTemporary temp(this);
2190
2191         GPRReg op1GPR = op1.gpr();
2192         GPRReg tempGPR = temp.gpr();
2193         FPRReg resultFPR = result.fpr();
2194         JITCompiler::JumpList done;
2195
2196         JITCompiler::Jump isInteger = m_jit.branch64(
2197             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2198
2199         if (node->child1().useKind() == NotCellUse) {
2200             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2201             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2202
2203             static const double zero = 0;
2204             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2205
2206             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2207             done.append(isNull);
2208
2209             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2210                 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2211
2212             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2213             static const double one = 1;
2214             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2215             done.append(m_jit.jump());
2216             done.append(isFalse);
2217
2218             isUndefined.link(&m_jit);
2219             static const double NaN = PNaN;
2220             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2221             done.append(m_jit.jump());
2222
2223             isNumber.link(&m_jit);
2224         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2225             typeCheck(
2226                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2227                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2228         }
2229     
2230         m_jit.move(op1GPR, tempGPR);
2231         unboxDouble(tempGPR, resultFPR);
2232         done.append(m_jit.jump());
2233     
2234         isInteger.link(&m_jit);
2235         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2236         done.link(&m_jit);
2237 #else // USE(JSVALUE64) -> this is the 32_64 case
2238         FPRTemporary temp(this);
2239     
2240         GPRReg op1TagGPR = op1.tagGPR();
2241         GPRReg op1PayloadGPR = op1.payloadGPR();
2242         FPRReg tempFPR = temp.fpr();
2243         FPRReg resultFPR = result.fpr();
2244         JITCompiler::JumpList done;
2245     
2246         JITCompiler::Jump isInteger = m_jit.branch32(
2247             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2248
2249         if (node->child1().useKind() == NotCellUse) {
2250             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2251             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2252
2253             static const double zero = 0;
2254             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2255
2256             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2257             done.append(isNull);
2258
2259             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2260
2261             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2262             static const double one = 1;
2263             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2264             done.append(m_jit.jump());
2265             done.append(isFalse);
2266
2267             isUndefined.link(&m_jit);
2268             static const double NaN = PNaN;
2269             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2270             done.append(m_jit.jump());
2271
2272             isNumber.link(&m_jit);
2273         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2274             typeCheck(
2275                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2276                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2277         }
2278
2279         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2280         done.append(m_jit.jump());
2281     
2282         isInteger.link(&m_jit);
2283         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2284         done.link(&m_jit);
2285 #endif // USE(JSVALUE64)
2286     
2287         doubleResult(resultFPR, node);
2288         return;
2289     }
2290         
2291 #if USE(JSVALUE64)
2292     case Int52RepUse: {
2293         SpeculateStrictInt52Operand value(this, node->child1());
2294         FPRTemporary result(this);
2295         
2296         GPRReg valueGPR = value.gpr();
2297         FPRReg resultFPR = result.fpr();
2298
2299         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2300         
2301         doubleResult(resultFPR, node);
2302         return;
2303     }
2304 #endif // USE(JSVALUE64)
2305         
2306     default:
2307         RELEASE_ASSERT_NOT_REACHED();
2308         return;
2309     }
2310 }
2311
2312 void SpeculativeJIT::compileValueRep(Node* node)
2313 {
2314     switch (node->child1().useKind()) {
2315     case DoubleRepUse: {
2316         SpeculateDoubleOperand value(this, node->child1());
2317         JSValueRegsTemporary result(this);
2318         
2319         FPRReg valueFPR = value.fpr();
2320         JSValueRegs resultRegs = result.regs();
2321         
2322         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2323         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2324         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2325         // local was purified.
2326         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2327             m_jit.purifyNaN(valueFPR);
2328
2329         boxDouble(valueFPR, resultRegs);
2330         
2331         jsValueResult(resultRegs, node);
2332         return;
2333     }
2334         
2335 #if USE(JSVALUE64)
2336     case Int52RepUse: {
2337         SpeculateStrictInt52Operand value(this, node->child1());
2338         GPRTemporary result(this);
2339         
2340         GPRReg valueGPR = value.gpr();
2341         GPRReg resultGPR = result.gpr();
2342         
2343         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2344         
2345         jsValueResult(resultGPR, node);
2346         return;
2347     }
2348 #endif // USE(JSVALUE64)
2349         
2350     default:
2351         RELEASE_ASSERT_NOT_REACHED();
2352         return;
2353     }
2354 }
2355
2356 static double clampDoubleToByte(double d)
2357 {
2358     d += 0.5;
2359     if (!(d > 0))
2360         d = 0;
2361     else if (d > 255)
2362         d = 255;
2363     return d;
2364 }
2365
2366 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2367 {
2368     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2369     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2370     jit.xorPtr(result, result);
2371     MacroAssembler::Jump clamped = jit.jump();
2372     tooBig.link(&jit);
2373     jit.move(JITCompiler::TrustedImm32(255), result);
2374     clamped.link(&jit);
2375     inBounds.link(&jit);
2376 }
2377
2378 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2379 {
2380     // Unordered compare so we pick up NaN
2381     static const double zero = 0;
2382     static const double byteMax = 255;
2383     static const double half = 0.5;
2384     jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2385     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2386     jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2387     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2388     
2389     jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2390     // FIXME: This should probably just use a floating point round!
2391     // https://bugs.webkit.org/show_bug.cgi?id=72054
2392     jit.addDouble(source, scratch);
2393     jit.truncateDoubleToInt32(scratch, result);   
2394     MacroAssembler::Jump truncatedInt = jit.jump();
2395     
2396     tooSmall.link(&jit);
2397     jit.xorPtr(result, result);
2398     MacroAssembler::Jump zeroed = jit.jump();
2399     
2400     tooBig.link(&jit);
2401     jit.move(JITCompiler::TrustedImm32(255), result);
2402     
2403     truncatedInt.link(&jit);
2404     zeroed.link(&jit);
2405
2406 }
2407
2408 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2409 {
2410     if (node->op() == PutByValAlias)
2411         return JITCompiler::Jump();
2412     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2413         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2414     if (view) {
2415         uint32_t length = view->length();
2416         Node* indexNode = m_jit.graph().child(node, 1).node();
2417         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2418             return JITCompiler::Jump();
2419         return m_jit.branch32(
2420             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2421     }
2422     return m_jit.branch32(
2423         MacroAssembler::AboveOrEqual, indexGPR,
2424         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2425 }
2426
2427 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2428 {
2429     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2430     if (!jump.isSet())
2431         return;
2432     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2433 }
2434
2435 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2436 {
2437     ASSERT(isInt(type));
2438     
2439     SpeculateCellOperand base(this, node->child1());
2440     SpeculateStrictInt32Operand property(this, node->child2());
2441     StorageOperand storage(this, node->child3());
2442
2443     GPRReg baseReg = base.gpr();
2444     GPRReg propertyReg = property.gpr();
2445     GPRReg storageReg = storage.gpr();
2446
2447     GPRTemporary result(this);
2448     GPRReg resultReg = result.gpr();
2449
2450     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2451
2452     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2453     switch (elementSize(type)) {
2454     case 1:
2455         if (isSigned(type))
2456             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2457         else
2458             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2459         break;
2460     case 2:
2461         if (isSigned(type))
2462             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2463         else
2464             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2465         break;
2466     case 4:
2467         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2468         break;
2469     default:
2470         CRASH();
2471     }
2472     if (elementSize(type) < 4 || isSigned(type)) {
2473         int32Result(resultReg, node);
2474         return;
2475     }
2476     
2477     ASSERT(elementSize(type) == 4 && !isSigned(type));
2478     if (node->shouldSpeculateInt32()) {
2479         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2480         int32Result(resultReg, node);
2481         return;
2482     }
2483     
2484 #if USE(JSVALUE64)
2485     if (node->shouldSpeculateMachineInt()) {
2486         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2487         strictInt52Result(resultReg, node);
2488         return;
2489     }
2490 #endif
2491     
2492     FPRTemporary fresult(this);
2493     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2494     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2495     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2496     positive.link(&m_jit);
2497     doubleResult(fresult.fpr(), node);
2498 }
2499
2500 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2501 {
2502     ASSERT(isInt(type));
2503     
2504     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2505     GPRReg storageReg = storage.gpr();
2506     
2507     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2508     
2509     GPRTemporary value;
2510     GPRReg valueGPR = InvalidGPRReg;
2511     
2512     if (valueUse->isConstant()) {
2513         JSValue jsValue = valueUse->asJSValue();
2514         if (!jsValue.isNumber()) {
2515             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2516             noResult(node);
2517             return;
2518         }
2519         double d = jsValue.asNumber();
2520         if (isClamped(type)) {
2521             ASSERT(elementSize(type) == 1);
2522             d = clampDoubleToByte(d);
2523         }
2524         GPRTemporary scratch(this);
2525         GPRReg scratchReg = scratch.gpr();
2526         m_jit.move(Imm32(toInt32(d)), scratchReg);
2527         value.adopt(scratch);
2528         valueGPR = scratchReg;
2529     } else {
2530         switch (valueUse.useKind()) {
2531         case Int32Use: {
2532             SpeculateInt32Operand valueOp(this, valueUse);
2533             GPRTemporary scratch(this);
2534             GPRReg scratchReg = scratch.gpr();
2535             m_jit.move(valueOp.gpr(), scratchReg);
2536             if (isClamped(type)) {
2537                 ASSERT(elementSize(type) == 1);
2538                 compileClampIntegerToByte(m_jit, scratchReg);
2539             }
2540             value.adopt(scratch);
2541             valueGPR = scratchReg;
2542             break;
2543         }
2544             
2545 #if USE(JSVALUE64)
2546         case Int52RepUse: {
2547             SpeculateStrictInt52Operand valueOp(this, valueUse);
2548             GPRTemporary scratch(this);
2549             GPRReg scratchReg = scratch.gpr();
2550             m_jit.move(valueOp.gpr(), scratchReg);
2551             if (isClamped(type)) {
2552                 ASSERT(elementSize(type) == 1);
2553                 MacroAssembler::Jump inBounds = m_jit.branch64(
2554                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2555                 MacroAssembler::Jump tooBig = m_jit.branch64(
2556                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2557                 m_jit.move(TrustedImm32(0), scratchReg);
2558                 MacroAssembler::Jump clamped = m_jit.jump();
2559                 tooBig.link(&m_jit);
2560                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2561                 clamped.link(&m_jit);
2562                 inBounds.link(&m_jit);
2563             }
2564             value.adopt(scratch);
2565             valueGPR = scratchReg;
2566             break;
2567         }
2568 #endif // USE(JSVALUE64)
2569             
2570         case DoubleRepUse: {
2571             if (isClamped(type)) {
2572                 ASSERT(elementSize(type) == 1);
2573                 SpeculateDoubleOperand valueOp(this, valueUse);
2574                 GPRTemporary result(this);
2575                 FPRTemporary floatScratch(this);
2576                 FPRReg fpr = valueOp.fpr();
2577                 GPRReg gpr = result.gpr();
2578                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2579                 value.adopt(result);
2580                 valueGPR = gpr;
2581             } else {
2582                 SpeculateDoubleOperand valueOp(this, valueUse);
2583                 GPRTemporary result(this);
2584                 FPRReg fpr = valueOp.fpr();
2585                 GPRReg gpr = result.gpr();
2586                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2587                 m_jit.xorPtr(gpr, gpr);
2588                 MacroAssembler::Jump fixed = m_jit.jump();
2589                 notNaN.link(&m_jit);
2590                 
2591                 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2592                     fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2593                 
2594                 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
2595                 
2596                 fixed.link(&m_jit);
2597                 value.adopt(result);
2598                 valueGPR = gpr;
2599             }
2600             break;
2601         }
2602             
2603         default:
2604             RELEASE_ASSERT_NOT_REACHED();
2605             break;
2606         }
2607     }
2608     
2609     ASSERT_UNUSED(valueGPR, valueGPR != property);
2610     ASSERT(valueGPR != base);
2611     ASSERT(valueGPR != storageReg);
2612     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2613     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2614         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2615         outOfBounds = MacroAssembler::Jump();
2616     }
2617
2618     switch (elementSize(type)) {
2619     case 1:
2620         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2621         break;
2622     case 2:
2623         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2624         break;
2625     case 4:
2626         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2627         break;
2628     default:
2629         CRASH();
2630     }
2631     if (outOfBounds.isSet())
2632         outOfBounds.link(&m_jit);
2633     noResult(node);
2634 }
2635
2636 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2637 {
2638     ASSERT(isFloat(type));
2639     
2640     SpeculateCellOperand base(this, node->child1());
2641     SpeculateStrictInt32Operand property(this, node->child2());
2642     StorageOperand storage(this, node->child3());
2643
2644     GPRReg baseReg = base.gpr();
2645     GPRReg propertyReg = property.gpr();
2646     GPRReg storageReg = storage.gpr();
2647
2648     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2649
2650     FPRTemporary result(this);
2651     FPRReg resultReg = result.fpr();
2652     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2653     switch (elementSize(type)) {
2654     case 4:
2655         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2656         m_jit.convertFloatToDouble(resultReg, resultReg);
2657         break;
2658     case 8: {
2659         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2660         break;
2661     }
2662     default:
2663         RELEASE_ASSERT_NOT_REACHED();
2664     }
2665     
2666     doubleResult(resultReg, node);
2667 }
2668
2669 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2670 {
2671     ASSERT(isFloat(type));
2672     
2673     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2674     GPRReg storageReg = storage.gpr();
2675     
2676     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2677     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2678
2679     SpeculateDoubleOperand valueOp(this, valueUse);
2680     FPRTemporary scratch(this);
2681     FPRReg valueFPR = valueOp.fpr();
2682     FPRReg scratchFPR = scratch.fpr();
2683
2684     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2685     
2686     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2687     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2688         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2689         outOfBounds = MacroAssembler::Jump();
2690     }
2691     
2692     switch (elementSize(type)) {
2693     case 4: {
2694         m_jit.moveDouble(valueFPR, scratchFPR);
2695         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2696         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2697         break;
2698     }
2699     case 8:
2700         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2701         break;
2702     default:
2703         RELEASE_ASSERT_NOT_REACHED();
2704     }
2705     if (outOfBounds.isSet())
2706         outOfBounds.link(&m_jit);
2707     noResult(node);
2708 }
2709
2710 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2711 {
2712     // Check that prototype is an object.
2713     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
2714     
2715     // Initialize scratchReg with the value being checked.
2716     m_jit.move(valueReg, scratchReg);
2717     
2718     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2719     MacroAssembler::Label loop(&m_jit);
2720     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2721     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2722     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2723 #if USE(JSVALUE64)
2724     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2725 #else
2726     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2727 #endif
2728     
2729     // No match - result is false.
2730 #if USE(JSVALUE64)
2731     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2732 #else
2733     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2734 #endif
2735     MacroAssembler::Jump putResult = m_jit.jump();
2736     
2737     isInstance.link(&m_jit);
2738 #if USE(JSVALUE64)
2739     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2740 #else
2741     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2742 #endif
2743     
2744     putResult.link(&m_jit);
2745 }
2746
2747 void SpeculativeJIT::compileCheckTypeInfoFlags(Node* node)
2748 {
2749     SpeculateCellOperand base(this, node->child1());
2750
2751     GPRReg baseGPR = base.gpr();
2752
2753     speculationCheck(BadTypeInfoFlags, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(node->typeInfoOperand())));
2754
2755     noResult(node);
2756 }
2757
2758 void SpeculativeJIT::compileInstanceOf(Node* node)
2759 {
2760     if (node->child1().useKind() == UntypedUse) {
2761         // It might not be a cell. Speculate less aggressively.
2762         // Or: it might only be used once (i.e. by us), so we get zero benefit
2763         // from speculating any more aggressively than we absolutely need to.
2764         
2765         JSValueOperand value(this, node->child1());
2766         SpeculateCellOperand prototype(this, node->child2());
2767         GPRTemporary scratch(this);
2768         GPRTemporary scratch2(this);
2769         
2770         GPRReg prototypeReg = prototype.gpr();
2771         GPRReg scratchReg = scratch.gpr();
2772         GPRReg scratch2Reg = scratch2.gpr();
2773         
2774         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
2775         GPRReg valueReg = value.jsValueRegs().payloadGPR();
2776         moveFalseTo(scratchReg);
2777
2778         MacroAssembler::Jump done = m_jit.jump();
2779         
2780         isCell.link(&m_jit);
2781         
2782         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2783         
2784         done.link(&m_jit);
2785
2786         blessedBooleanResult(scratchReg, node);
2787         return;
2788     }
2789     
2790     SpeculateCellOperand value(this, node->child1());
2791     SpeculateCellOperand prototype(this, node->child2());
2792     
2793     GPRTemporary scratch(this);
2794     GPRTemporary scratch2(this);
2795     
2796     GPRReg valueReg = value.gpr();
2797     GPRReg prototypeReg = prototype.gpr();
2798     GPRReg scratchReg = scratch.gpr();
2799     GPRReg scratch2Reg = scratch2.gpr();
2800     
2801     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2802
2803     blessedBooleanResult(scratchReg, node);
2804 }
2805
2806 template<typename SnippetGenerator, J_JITOperation_EJJ snippetSlowPathFunction>
2807 void SpeculativeJIT::emitUntypedBitOp(Node* node)
2808 {
2809     Edge& leftChild = node->child1();
2810     Edge& rightChild = node->child2();
2811
2812     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
2813         JSValueOperand left(this, leftChild);
2814         JSValueOperand right(this, rightChild);
2815         JSValueRegs leftRegs = left.jsValueRegs();
2816         JSValueRegs rightRegs = right.jsValueRegs();
2817 #if USE(JSVALUE64)
2818         GPRTemporary result(this);
2819         JSValueRegs resultRegs = JSValueRegs(result.gpr());
2820 #else
2821         GPRTemporary resultTag(this);
2822         GPRTemporary resultPayload(this);
2823         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2824 #endif
2825         flushRegisters();
2826         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
2827         m_jit.exceptionCheck();
2828
2829         jsValueResult(resultRegs, node);
2830         return;
2831     }
2832
2833     Optional<JSValueOperand> left;
2834     Optional<JSValueOperand> right;
2835
2836     JSValueRegs leftRegs;
2837     JSValueRegs rightRegs;
2838
2839 #if USE(JSVALUE64)
2840     GPRTemporary result(this);
2841     JSValueRegs resultRegs = JSValueRegs(result.gpr());
2842     GPRTemporary scratch(this);
2843     GPRReg scratchGPR = scratch.gpr();
2844 #else
2845     GPRTemporary resultTag(this);
2846     GPRTemporary resultPayload(this);
2847     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2848     GPRReg scratchGPR = resultTag.gpr();
2849 #endif
2850
2851     SnippetOperand leftOperand;
2852     SnippetOperand rightOperand;
2853
2854     // The snippet generator does not support both operands being constant. If the left
2855     // operand is already const, we'll ignore the right operand's constness.
2856     if (leftChild->isInt32Constant())
2857         leftOperand.setConstInt32(leftChild->asInt32());
2858     else if (rightChild->isInt32Constant())
2859         rightOperand.setConstInt32(rightChild->asInt32());
2860
2861     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
2862
2863     if (!leftOperand.isConst()) {
2864         left = JSValueOperand(this, leftChild);
2865         leftRegs = left->jsValueRegs();
2866     }
2867     if (!rightOperand.isConst()) {
2868         right = JSValueOperand(this, rightChild);
2869         rightRegs = right->jsValueRegs();
2870     }
2871
2872     SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
2873     gen.generateFastPath(m_jit);
2874
2875     ASSERT(gen.didEmitFastPath());
2876     gen.endJumpList().append(m_jit.jump());
2877
2878     gen.slowPathJumpList().link(&m_jit);
2879     silentSpillAllRegisters(resultRegs);
2880
2881     if (leftOperand.isConst()) {
2882         leftRegs = resultRegs;
2883         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
2884     } else if (rightOperand.isConst()) {
2885         rightRegs = resultRegs;
2886         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
2887     }
2888
2889     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
2890
2891     silentFillAllRegisters(resultRegs);
2892     m_jit.exceptionCheck();
2893
2894     gen.endJumpList().link(&m_jit);
2895     jsValueResult(resultRegs, node);
2896 }
2897
2898 void SpeculativeJIT::compileBitwiseOp(Node* node)
2899 {
2900     NodeType op = node->op();
2901     Edge& leftChild = node->child1();
2902     Edge& rightChild = node->child2();
2903
2904     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
2905         switch (op) {
2906         case BitAnd:
2907             emitUntypedBitOp<JITBitAndGenerator, operationValueBitAnd>(node);
2908             return;
2909         case BitOr:
2910             emitUntypedBitOp<JITBitOrGenerator, operationValueBitOr>(node);
2911             return;
2912         case BitXor:
2913             emitUntypedBitOp<JITBitXorGenerator, operationValueBitXor>(node);
2914             return;
2915         default:
2916             RELEASE_ASSERT_NOT_REACHED();
2917         }
2918     }
2919
2920     if (leftChild->isInt32Constant()) {
2921         SpeculateInt32Operand op2(this, rightChild);
2922         GPRTemporary result(this, Reuse, op2);
2923
2924         bitOp(op, leftChild->asInt32(), op2.gpr(), result.gpr());
2925
2926         int32Result(result.gpr(), node);
2927
2928     } else if (rightChild->isInt32Constant()) {
2929         SpeculateInt32Operand op1(this, leftChild);
2930         GPRTemporary result(this, Reuse, op1);
2931
2932         bitOp(op, rightChild->asInt32(), op1.gpr(), result.gpr());
2933
2934         int32Result(result.gpr(), node);
2935
2936     } else {
2937         SpeculateInt32Operand op1(this, leftChild);
2938         SpeculateInt32Operand op2(this, rightChild);
2939         GPRTemporary result(this, Reuse, op1, op2);
2940         
2941         GPRReg reg1 = op1.gpr();
2942         GPRReg reg2 = op2.gpr();
2943         bitOp(op, reg1, reg2, result.gpr());
2944         
2945         int32Result(result.gpr(), node);
2946     }
2947 }
2948
2949 void SpeculativeJIT::emitUntypedRightShiftBitOp(Node* node)
2950 {
2951     J_JITOperation_EJJ snippetSlowPathFunction = node->op() == BitRShift
2952         ? operationValueBitRShift : operationValueBitURShift;
2953     JITRightShiftGenerator::ShiftType shiftType = node->op() == BitRShift
2954         ? JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift;
2955
2956     Edge& leftChild = node->child1();
2957     Edge& rightChild = node->child2();
2958
2959     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
2960         JSValueOperand left(this, leftChild);
2961         JSValueOperand right(this, rightChild);
2962         JSValueRegs leftRegs = left.jsValueRegs();
2963         JSValueRegs rightRegs = right.jsValueRegs();
2964 #if USE(JSVALUE64)
2965         GPRTemporary result(this);
2966         JSValueRegs resultRegs = JSValueRegs(result.gpr());
2967 #else
2968         GPRTemporary resultTag(this);
2969         GPRTemporary resultPayload(this);
2970         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2971 #endif
2972         flushRegisters();
2973         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
2974         m_jit.exceptionCheck();
2975
2976         jsValueResult(resultRegs, node);
2977         return;
2978     }
2979
2980     Optional<JSValueOperand> left;
2981     Optional<JSValueOperand> right;
2982
2983     JSValueRegs leftRegs;
2984     JSValueRegs rightRegs;
2985
2986     FPRTemporary leftNumber(this);
2987     FPRReg leftFPR = leftNumber.fpr();
2988
2989 #if USE(JSVALUE64)
2990     GPRTemporary result(this);
2991     JSValueRegs resultRegs = JSValueRegs(result.gpr());
2992     GPRTemporary scratch(this);
2993     GPRReg scratchGPR = scratch.gpr();
2994     FPRReg scratchFPR = InvalidFPRReg;
2995 #else
2996     GPRTemporary resultTag(this);
2997     GPRTemporary resultPayload(this);
2998     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2999     GPRReg scratchGPR = resultTag.gpr();
3000     FPRTemporary fprScratch(this);
3001     FPRReg scratchFPR = fprScratch.fpr();
3002 #endif
3003
3004     SnippetOperand leftOperand;
3005     SnippetOperand rightOperand;
3006
3007     // The snippet generator does not support both operands being constant. If the left
3008     // operand is already const, we'll ignore the right operand's constness.
3009     if (leftChild->isInt32Constant())
3010         leftOperand.setConstInt32(leftChild->asInt32());
3011     else if (rightChild->isInt32Constant())
3012         rightOperand.setConstInt32(rightChild->asInt32());
3013
3014     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3015
3016     if (!leftOperand.isConst()) {
3017         left = JSValueOperand(this, leftChild);
3018         leftRegs = left->jsValueRegs();
3019     }
3020     if (!rightOperand.isConst()) {
3021         right = JSValueOperand(this, rightChild);
3022         rightRegs = right->jsValueRegs();
3023     }
3024
3025     JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3026         leftFPR, scratchGPR, scratchFPR, shiftType);
3027     gen.generateFastPath(m_jit);
3028
3029     ASSERT(gen.didEmitFastPath());
3030     gen.endJumpList().append(m_jit.jump());
3031
3032     gen.slowPathJumpList().link(&m_jit);
3033     silentSpillAllRegisters(resultRegs);
3034
3035     if (leftOperand.isConst()) {
3036         leftRegs = resultRegs;
3037         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3038     } else if (rightOperand.isConst()) {
3039         rightRegs = resultRegs;
3040         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3041     }
3042
3043     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3044
3045     silentFillAllRegisters(resultRegs);
3046     m_jit.exceptionCheck();
3047
3048     gen.endJumpList().link(&m_jit);
3049     jsValueResult(resultRegs, node);
3050     return;
3051 }
3052
3053 void SpeculativeJIT::compileShiftOp(Node* node)
3054 {
3055     NodeType op = node->op();
3056     Edge& leftChild = node->child1();
3057     Edge& rightChild = node->child2();
3058
3059     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3060         switch (op) {
3061         case BitLShift:
3062             emitUntypedBitOp<JITLeftShiftGenerator, operationValueBitLShift>(node);
3063             return;
3064         case BitRShift:
3065         case BitURShift:
3066             emitUntypedRightShiftBitOp(node);
3067             return;
3068         default:
3069             RELEASE_ASSERT_NOT_REACHED();
3070         }
3071     }
3072
3073     if (rightChild->isInt32Constant()) {
3074         SpeculateInt32Operand op1(this, leftChild);
3075         GPRTemporary result(this, Reuse, op1);
3076
3077         shiftOp(op, op1.gpr(), rightChild->asInt32() & 0x1f, result.gpr());
3078
3079         int32Result(result.gpr(), node);
3080     } else {
3081         // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
3082         SpeculateInt32Operand op1(this, leftChild);
3083         SpeculateInt32Operand op2(this, rightChild);
3084         GPRTemporary result(this, Reuse, op1);
3085
3086         GPRReg reg1 = op1.gpr();
3087         GPRReg reg2 = op2.gpr();
3088         shiftOp(op, reg1, reg2, result.gpr());
3089
3090         int32Result(result.gpr(), node);
3091     }
3092 }
3093
3094 void SpeculativeJIT::compileValueAdd(Node* node)
3095 {
3096     Edge& leftChild = node->child1();
3097     Edge& rightChild = node->child2();
3098
3099     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3100         JSValueOperand left(this, leftChild);
3101         JSValueOperand right(this, rightChild);
3102         JSValueRegs leftRegs = left.jsValueRegs();
3103         JSValueRegs rightRegs = right.jsValueRegs();
3104 #if USE(JSVALUE64)
3105         GPRTemporary result(this);
3106         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3107 #else
3108         GPRTemporary resultTag(this);
3109         GPRTemporary resultPayload(this);
3110         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3111 #endif
3112         flushRegisters();
3113         callOperation(operationValueAddNotNumber, resultRegs, leftRegs, rightRegs);
3114         m_jit.exceptionCheck();
3115     
3116         jsValueResult(resultRegs, node);
3117         return;
3118     }
3119
3120     Optional<JSValueOperand> left;
3121     Optional<JSValueOperand> right;
3122
3123     JSValueRegs leftRegs;
3124     JSValueRegs rightRegs;
3125
3126     FPRTemporary leftNumber(this);
3127     FPRTemporary rightNumber(this);
3128     FPRReg leftFPR = leftNumber.fpr();
3129     FPRReg rightFPR = rightNumber.fpr();
3130
3131 #if USE(JSVALUE64)
3132     GPRTemporary result(this);
3133     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3134     GPRTemporary scratch(this);
3135     GPRReg scratchGPR = scratch.gpr();
3136     FPRReg scratchFPR = InvalidFPRReg;
3137 #else
3138     GPRTemporary resultTag(this);
3139     GPRTemporary resultPayload(this);
3140     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3141     GPRReg scratchGPR = resultTag.gpr();
3142     FPRTemporary fprScratch(this);
3143     FPRReg scratchFPR = fprScratch.fpr();
3144 #endif
3145
3146     SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3147     SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3148
3149     // The snippet generator does not support both operands being constant. If the left
3150     // operand is already const, we'll ignore the right operand's constness.
3151     if (leftChild->isInt32Constant())
3152         leftOperand.setConstInt32(leftChild->asInt32());
3153     else if (rightChild->isInt32Constant())
3154         rightOperand.setConstInt32(rightChild->asInt32());
3155
3156     ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3157
3158     if (!leftOperand.isConst()) {
3159         left = JSValueOperand(this, leftChild);
3160         leftRegs = left->jsValueRegs();
3161     }
3162     if (!rightOperand.isConst()) {
3163         right = JSValueOperand(this, rightChild);
3164         rightRegs = right->jsValueRegs();
3165     }
3166
3167     JITAddGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3168         leftFPR, rightFPR, scratchGPR, scratchFPR);
3169     gen.generateFastPath(m_jit);
3170
3171     ASSERT(gen.didEmitFastPath());
3172     gen.endJumpList().append(m_jit.jump());
3173
3174     gen.slowPathJumpList().link(&m_jit);
3175
3176     silentSpillAllRegisters(resultRegs);
3177
3178     if (leftOperand.isConst()) {
3179         leftRegs = resultRegs;
3180         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3181     } else if (rightOperand.isConst()) {
3182         rightRegs = resultRegs;
3183         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3184     }
3185
3186     callOperation(operationValueAdd, resultRegs, leftRegs, rightRegs);
3187
3188     silentFillAllRegisters(resultRegs);
3189     m_jit.exceptionCheck();
3190
3191     gen.endJumpList().link(&m_jit);
3192     jsValueResult(resultRegs, node);
3193     return;
3194 }
3195
3196 void SpeculativeJIT::compileInstanceOfCustom(Node* node)
3197 {
3198     // We could do something smarter here but this case is currently super rare and unless
3199     // Symbol.hasInstance becomes popular will likely remain that way.
3200
3201     JSValueOperand value(this, node->child1());
3202     SpeculateCellOperand constructor(this, node->child2());
3203     JSValueOperand hasInstanceValue(this, node->child3());
3204     GPRTemporary result(this);
3205
3206     JSValueRegs valueRegs = value.jsValueRegs();
3207     GPRReg constructorGPR = constructor.gpr();
3208     JSValueRegs hasInstanceRegs = hasInstanceValue.jsValueRegs();
3209     GPRReg resultGPR = result.gpr();
3210
3211     MacroAssembler::Jump slowCase = m_jit.jump();
3212
3213     addSlowPathGenerator(slowPathCall(slowCase, this, operationInstanceOfCustom, resultGPR, valueRegs, constructorGPR, hasInstanceRegs));
3214
3215     unblessedBooleanResult(resultGPR, node);
3216 }
3217
3218 void SpeculativeJIT::compileArithAdd(Node* node)
3219 {
3220     switch (node->binaryUseKind()) {
3221     case Int32Use: {
3222         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3223         
3224         if (node->child1()->isInt32Constant()) {
3225             int32_t imm1 = node->child1()->asInt32();
3226             SpeculateInt32Operand op2(this, node->child2());
3227             GPRTemporary result(this);
3228
3229             if (!shouldCheckOverflow(node->arithMode())) {
3230                 m_jit.move(op2.gpr(), result.gpr());
3231                 m_jit.add32(Imm32(imm1), result.gpr());
3232             } else
3233                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
3234
3235             int32Result(result.gpr(), node);
3236             return;
3237         }
3238         
3239         if (node->child2()->isInt32Constant()) {
3240             SpeculateInt32Operand op1(this, node->child1());
3241             int32_t imm2 = node->child2()->asInt32();
3242             GPRTemporary result(this);
3243                 
3244             if (!shouldCheckOverflow(node->arithMode())) {
3245                 m_jit.move(op1.gpr(), result.gpr());
3246                 m_jit.add32(Imm32(imm2), result.gpr());
3247             } else
3248                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
3249
3250             int32Result(result.gpr(), node);
3251             return;
3252         }
3253                 
3254         SpeculateInt32Operand op1(this, node->child1());
3255         SpeculateInt32Operand op2(this, node->child2());
3256         GPRTemporary result(this, Reuse, op1, op2);
3257
3258         GPRReg gpr1 = op1.gpr();
3259         GPRReg gpr2 = op2.gpr();
3260         GPRReg gprResult = result.gpr();
3261
3262         if (!shouldCheckOverflow(node->arithMode())) {
3263             if (gpr1 == gprResult)
3264                 m_jit.add32(gpr2, gprResult);
3265             else {
3266                 m_jit.move(gpr2, gprResult);
3267                 m_jit.add32(gpr1, gprResult);
3268             }
3269         } else {
3270             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
3271                 
3272             if (gpr1 == gprResult)
3273                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
3274             else if (gpr2 == gprResult)
3275                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
3276             else
3277                 speculationCheck(Overflow, JSValueRegs(), 0, check);
3278         }
3279
3280         int32Result(gprResult, node);
3281         return;
3282     }
3283         
3284 #if USE(JSVALUE64)
3285     case Int52RepUse: {
3286         ASSERT(shouldCheckOverflow(node->arithMode()));
3287         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3288
3289         // Will we need an overflow check? If we can prove that neither input can be
3290         // Int52 then the overflow check will not be necessary.
3291         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
3292             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
3293             SpeculateWhicheverInt52Operand op1(this, node->child1());
3294             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3295             GPRTemporary result(this, Reuse, op1);
3296             m_jit.move(op1.gpr(), result.gpr());
3297             m_jit.add64(op2.gpr(), result.gpr());
3298             int52Result(result.gpr(), node, op1.format());
3299             return;
3300         }
3301         
3302         SpeculateInt52Operand op1(this, node->child1());
3303         SpeculateInt52Operand op2(this, node->child2());
3304         GPRTemporary result(this);
3305         m_jit.move(op1.gpr(), result.gpr());
3306         speculationCheck(
3307             Int52Overflow, JSValueRegs(), 0,
3308             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3309         int52Result(result.gpr(), node);
3310         return;
3311     }
3312 #endif // USE(JSVALUE64)
3313     
3314     case DoubleRepUse: {
3315         SpeculateDoubleOperand op1(this, node->child1());
3316         SpeculateDoubleOperand op2(this, node->child2());
3317         FPRTemporary result(this, op1, op2);
3318
3319         FPRReg reg1 = op1.fpr();
3320         FPRReg reg2 = op2.fpr();
3321         m_jit.addDouble(reg1, reg2, result.fpr());
3322
3323         doubleResult(result.fpr(), node);
3324         return;
3325     }
3326         
3327     default:
3328         RELEASE_ASSERT_NOT_REACHED();
3329         break;
3330     }
3331 }
3332
3333 void SpeculativeJIT::compileMakeRope(Node* node)
3334 {
3335     ASSERT(node->child1().useKind() == KnownStringUse);
3336     ASSERT(node->child2().useKind() == KnownStringUse);
3337     ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
3338     
3339     SpeculateCellOperand op1(this, node->child1());
3340     SpeculateCellOperand op2(this, node->child2());
3341     SpeculateCellOperand op3(this, node->child3());