Unreviewed, rolling out r192876.
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGOSRExitFuzz.h"
38 #include "DFGSaneStringGetByValSlowPathGenerator.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DirectArguments.h"
41 #include "JITAddGenerator.h"
42 #include "JITSubGenerator.h"
43 #include "JSArrowFunction.h"
44 #include "JSCInlines.h"
45 #include "JSEnvironmentRecord.h"
46 #include "JSLexicalEnvironment.h"
47 #include "LinkBuffer.h"
48 #include "ScopedArguments.h"
49 #include "ScratchRegisterAllocator.h"
50 #include "WriteBarrierBuffer.h"
51 #include <wtf/MathExtras.h>
52
53 namespace JSC { namespace DFG {
54
55 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
56     : m_compileOkay(true)
57     , m_jit(jit)
58     , m_currentNode(0)
59     , m_lastGeneratedNode(LastNodeType)
60     , m_indexInBlock(0)
61     , m_generationInfo(m_jit.graph().frameRegisterCount())
62     , m_state(m_jit.graph())
63     , m_interpreter(m_jit.graph(), m_state)
64     , m_stream(&jit.jitCode()->variableEventStream)
65     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
66 {
67 }
68
69 SpeculativeJIT::~SpeculativeJIT()
70 {
71 }
72
73 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
74 {
75     ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
76     
77     GPRTemporary scratch(this);
78     GPRTemporary scratch2(this);
79     GPRReg scratchGPR = scratch.gpr();
80     GPRReg scratch2GPR = scratch2.gpr();
81     
82     unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
83     
84     JITCompiler::JumpList slowCases;
85     
86     slowCases.append(
87         emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
88     m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
89     emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
90     
91     m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
92     m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
93     
94     if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
95 #if USE(JSVALUE64)
96         m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
97         for (unsigned i = numElements; i < vectorLength; ++i)
98             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
99 #else
100         EncodedValueDescriptor value;
101         value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
102         for (unsigned i = numElements; i < vectorLength; ++i) {
103             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
104             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
105         }
106 #endif
107     }
108     
109     // I want a slow path that also loads out the storage pointer, and that's
110     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
111     // of work for a very small piece of functionality. :-/
112     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
113         slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
114         structure, numElements));
115 }
116
117 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
118 {
119     if (inlineCallFrame && !inlineCallFrame->isVarargs())
120         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
121     else {
122         VirtualRegister argumentCountRegister;
123         if (!inlineCallFrame)
124             argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
125         else
126             argumentCountRegister = inlineCallFrame->argumentCountRegister;
127         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
128         if (!includeThis)
129             m_jit.sub32(TrustedImm32(1), lengthGPR);
130     }
131 }
132
133 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
134 {
135     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
136 }
137
138 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
139 {
140     if (origin.inlineCallFrame) {
141         if (origin.inlineCallFrame->isClosureCall) {
142             m_jit.loadPtr(
143                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
144                 calleeGPR);
145         } else {
146             m_jit.move(
147                 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
148                 calleeGPR);
149         }
150     } else
151         m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), calleeGPR);
152 }
153
154 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
155 {
156     m_jit.addPtr(
157         TrustedImm32(
158             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
159         GPRInfo::callFrameRegister, startGPR);
160 }
161
162 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
163 {
164     if (!doOSRExitFuzzing())
165         return MacroAssembler::Jump();
166     
167     MacroAssembler::Jump result;
168     
169     m_jit.pushToSave(GPRInfo::regT0);
170     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
171     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
172     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
173     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
174     unsigned at = Options::fireOSRExitFuzzAt();
175     if (at || atOrAfter) {
176         unsigned threshold;
177         MacroAssembler::RelationalCondition condition;
178         if (atOrAfter) {
179             threshold = atOrAfter;
180             condition = MacroAssembler::Below;
181         } else {
182             threshold = at;
183             condition = MacroAssembler::NotEqual;
184         }
185         MacroAssembler::Jump ok = m_jit.branch32(
186             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
187         m_jit.popToRestore(GPRInfo::regT0);
188         result = m_jit.jump();
189         ok.link(&m_jit);
190     }
191     m_jit.popToRestore(GPRInfo::regT0);
192     
193     return result;
194 }
195
196 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
197 {
198     if (!m_compileOkay)
199         return;
200     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
201     if (fuzzJump.isSet()) {
202         JITCompiler::JumpList jumpsToFail;
203         jumpsToFail.append(fuzzJump);
204         jumpsToFail.append(jumpToFail);
205         m_jit.appendExitInfo(jumpsToFail);
206     } else
207         m_jit.appendExitInfo(jumpToFail);
208     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
209 }
210
211 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
212 {
213     if (!m_compileOkay)
214         return;
215     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
216     if (fuzzJump.isSet()) {
217         JITCompiler::JumpList myJumpsToFail;
218         myJumpsToFail.append(jumpsToFail);
219         myJumpsToFail.append(fuzzJump);
220         m_jit.appendExitInfo(myJumpsToFail);
221     } else
222         m_jit.appendExitInfo(jumpsToFail);
223     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
224 }
225
226 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
227 {
228     if (!m_compileOkay)
229         return OSRExitJumpPlaceholder();
230     unsigned index = m_jit.jitCode()->osrExit.size();
231     m_jit.appendExitInfo();
232     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
233     return OSRExitJumpPlaceholder(index);
234 }
235
236 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
237 {
238     return speculationCheck(kind, jsValueSource, nodeUse.node());
239 }
240
241 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
242 {
243     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
244 }
245
246 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
247 {
248     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
249 }
250
251 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
252 {
253     if (!m_compileOkay)
254         return;
255     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
256     m_jit.appendExitInfo(jumpToFail);
257     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
258 }
259
260 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
261 {
262     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
263 }
264
265 void SpeculativeJIT::emitInvalidationPoint(Node* node)
266 {
267     if (!m_compileOkay)
268         return;
269     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
270     m_jit.jitCode()->appendOSRExit(OSRExit(
271         UncountableInvalidation, JSValueSource(),
272         m_jit.graph().methodOfGettingAValueProfileFor(node),
273         this, m_stream->size()));
274     info.m_replacementSource = m_jit.watchpointLabel();
275     ASSERT(info.m_replacementSource.isSet());
276     noResult(node);
277 }
278
279 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
280 {
281     if (!m_compileOkay)
282         return;
283     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
284     m_compileOkay = false;
285     if (verboseCompilationEnabled())
286         dataLog("Bailing compilation.\n");
287 }
288
289 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
290 {
291     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
292 }
293
294 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
295 {
296     ASSERT(needsTypeCheck(edge, typesPassedThrough));
297     m_interpreter.filter(edge, typesPassedThrough);
298     speculationCheck(BadType, source, edge.node(), jumpToFail);
299 }
300
301 RegisterSet SpeculativeJIT::usedRegisters()
302 {
303     RegisterSet result;
304     
305     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
306         GPRReg gpr = GPRInfo::toRegister(i);
307         if (m_gprs.isInUse(gpr))
308             result.set(gpr);
309     }
310     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
311         FPRReg fpr = FPRInfo::toRegister(i);
312         if (m_fprs.isInUse(fpr))
313             result.set(fpr);
314     }
315     
316     result.merge(RegisterSet::stubUnavailableRegisters());
317     
318     return result;
319 }
320
321 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
322 {
323     m_slowPathGenerators.append(WTF::move(slowPathGenerator));
324 }
325
326 void SpeculativeJIT::runSlowPathGenerators()
327 {
328     for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
329         m_slowPathGenerators[i]->generate(this);
330 }
331
332 // On Windows we need to wrap fmod; on other platforms we can call it directly.
333 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
334 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
335 static double JIT_OPERATION fmodAsDFGOperation(double x, double y)
336 {
337     return fmod(x, y);
338 }
339 #else
340 #define fmodAsDFGOperation fmod
341 #endif
342
343 void SpeculativeJIT::clearGenerationInfo()
344 {
345     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
346         m_generationInfo[i] = GenerationInfo();
347     m_gprs = RegisterBank<GPRInfo>();
348     m_fprs = RegisterBank<FPRInfo>();
349 }
350
351 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
352 {
353     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
354     Node* node = info.node();
355     DataFormat registerFormat = info.registerFormat();
356     ASSERT(registerFormat != DataFormatNone);
357     ASSERT(registerFormat != DataFormatDouble);
358         
359     SilentSpillAction spillAction;
360     SilentFillAction fillAction;
361         
362     if (!info.needsSpill())
363         spillAction = DoNothingForSpill;
364     else {
365 #if USE(JSVALUE64)
366         ASSERT(info.gpr() == source);
367         if (registerFormat == DataFormatInt32)
368             spillAction = Store32Payload;
369         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
370             spillAction = StorePtr;
371         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
372             spillAction = Store64;
373         else {
374             ASSERT(registerFormat & DataFormatJS);
375             spillAction = Store64;
376         }
377 #elif USE(JSVALUE32_64)
378         if (registerFormat & DataFormatJS) {
379             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
380             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
381         } else {
382             ASSERT(info.gpr() == source);
383             spillAction = Store32Payload;
384         }
385 #endif
386     }
387         
388     if (registerFormat == DataFormatInt32) {
389         ASSERT(info.gpr() == source);
390         ASSERT(isJSInt32(info.registerFormat()));
391         if (node->hasConstant()) {
392             ASSERT(node->isInt32Constant());
393             fillAction = SetInt32Constant;
394         } else
395             fillAction = Load32Payload;
396     } else if (registerFormat == DataFormatBoolean) {
397 #if USE(JSVALUE64)
398         RELEASE_ASSERT_NOT_REACHED();
399 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
400         fillAction = DoNothingForFill;
401 #endif
402 #elif USE(JSVALUE32_64)
403         ASSERT(info.gpr() == source);
404         if (node->hasConstant()) {
405             ASSERT(node->isBooleanConstant());
406             fillAction = SetBooleanConstant;
407         } else
408             fillAction = Load32Payload;
409 #endif
410     } else if (registerFormat == DataFormatCell) {
411         ASSERT(info.gpr() == source);
412         if (node->hasConstant()) {
413             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
414             node->asCell(); // To get the assertion.
415             fillAction = SetCellConstant;
416         } else {
417 #if USE(JSVALUE64)
418             fillAction = LoadPtr;
419 #else
420             fillAction = Load32Payload;
421 #endif
422         }
423     } else if (registerFormat == DataFormatStorage) {
424         ASSERT(info.gpr() == source);
425         fillAction = LoadPtr;
426     } else if (registerFormat == DataFormatInt52) {
427         if (node->hasConstant())
428             fillAction = SetInt52Constant;
429         else if (info.spillFormat() == DataFormatInt52)
430             fillAction = Load64;
431         else if (info.spillFormat() == DataFormatStrictInt52)
432             fillAction = Load64ShiftInt52Left;
433         else if (info.spillFormat() == DataFormatNone)
434             fillAction = Load64;
435         else {
436             RELEASE_ASSERT_NOT_REACHED();
437 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
438             fillAction = Load64; // Make GCC happy.
439 #endif
440         }
441     } else if (registerFormat == DataFormatStrictInt52) {
442         if (node->hasConstant())
443             fillAction = SetStrictInt52Constant;
444         else if (info.spillFormat() == DataFormatInt52)
445             fillAction = Load64ShiftInt52Right;
446         else if (info.spillFormat() == DataFormatStrictInt52)
447             fillAction = Load64;
448         else if (info.spillFormat() == DataFormatNone)
449             fillAction = Load64;
450         else {
451             RELEASE_ASSERT_NOT_REACHED();
452 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
453             fillAction = Load64; // Make GCC happy.
454 #endif
455         }
456     } else {
457         ASSERT(registerFormat & DataFormatJS);
458 #if USE(JSVALUE64)
459         ASSERT(info.gpr() == source);
460         if (node->hasConstant()) {
461             if (node->isCellConstant())
462                 fillAction = SetTrustedJSConstant;
463             else
464                 fillAction = SetJSConstant;
465         } else if (info.spillFormat() == DataFormatInt32) {
466             ASSERT(registerFormat == DataFormatJSInt32);
467             fillAction = Load32PayloadBoxInt;
468         } else
469             fillAction = Load64;
470 #else
471         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
472         if (node->hasConstant())
473             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
474         else if (info.payloadGPR() == source)
475             fillAction = Load32Payload;
476         else { // Fill the Tag
477             switch (info.spillFormat()) {
478             case DataFormatInt32:
479                 ASSERT(registerFormat == DataFormatJSInt32);
480                 fillAction = SetInt32Tag;
481                 break;
482             case DataFormatCell:
483                 ASSERT(registerFormat == DataFormatJSCell);
484                 fillAction = SetCellTag;
485                 break;
486             case DataFormatBoolean:
487                 ASSERT(registerFormat == DataFormatJSBoolean);
488                 fillAction = SetBooleanTag;
489                 break;
490             default:
491                 fillAction = Load32Tag;
492                 break;
493             }
494         }
495 #endif
496     }
497         
498     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
499 }
500     
501 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
502 {
503     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
504     Node* node = info.node();
505     ASSERT(info.registerFormat() == DataFormatDouble);
506
507     SilentSpillAction spillAction;
508     SilentFillAction fillAction;
509         
510     if (!info.needsSpill())
511         spillAction = DoNothingForSpill;
512     else {
513         ASSERT(!node->hasConstant());
514         ASSERT(info.spillFormat() == DataFormatNone);
515         ASSERT(info.fpr() == source);
516         spillAction = StoreDouble;
517     }
518         
519 #if USE(JSVALUE64)
520     if (node->hasConstant()) {
521         node->asNumber(); // To get the assertion.
522         fillAction = SetDoubleConstant;
523     } else {
524         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
525         fillAction = LoadDouble;
526     }
527 #elif USE(JSVALUE32_64)
528     ASSERT(info.registerFormat() == DataFormatDouble);
529     if (node->hasConstant()) {
530         node->asNumber(); // To get the assertion.
531         fillAction = SetDoubleConstant;
532     } else
533         fillAction = LoadDouble;
534 #endif
535
536     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
537 }
538     
539 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
540 {
541     switch (plan.spillAction()) {
542     case DoNothingForSpill:
543         break;
544     case Store32Tag:
545         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
546         break;
547     case Store32Payload:
548         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
549         break;
550     case StorePtr:
551         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
552         break;
553 #if USE(JSVALUE64)
554     case Store64:
555         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
556         break;
557 #endif
558     case StoreDouble:
559         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
560         break;
561     default:
562         RELEASE_ASSERT_NOT_REACHED();
563     }
564 }
565     
566 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
567 {
568 #if USE(JSVALUE32_64)
569     UNUSED_PARAM(canTrample);
570 #endif
571     switch (plan.fillAction()) {
572     case DoNothingForFill:
573         break;
574     case SetInt32Constant:
575         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
576         break;
577 #if USE(JSVALUE64)
578     case SetInt52Constant:
579         m_jit.move(Imm64(plan.node()->asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
580         break;
581     case SetStrictInt52Constant:
582         m_jit.move(Imm64(plan.node()->asMachineInt()), plan.gpr());
583         break;
584 #endif // USE(JSVALUE64)
585     case SetBooleanConstant:
586         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
587         break;
588     case SetCellConstant:
589         m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
590         break;
591 #if USE(JSVALUE64)
592     case SetTrustedJSConstant:
593         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
594         break;
595     case SetJSConstant:
596         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
597         break;
598     case SetDoubleConstant:
599         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
600         m_jit.move64ToDouble(canTrample, plan.fpr());
601         break;
602     case Load32PayloadBoxInt:
603         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
604         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
605         break;
606     case Load32PayloadConvertToInt52:
607         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
608         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
609         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
610         break;
611     case Load32PayloadSignExtend:
612         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
613         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
614         break;
615 #else
616     case SetJSConstantTag:
617         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
618         break;
619     case SetJSConstantPayload:
620         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
621         break;
622     case SetInt32Tag:
623         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
624         break;
625     case SetCellTag:
626         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
627         break;
628     case SetBooleanTag:
629         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
630         break;
631     case SetDoubleConstant:
632         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
633         break;
634 #endif
635     case Load32Tag:
636         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
637         break;
638     case Load32Payload:
639         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
640         break;
641     case LoadPtr:
642         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
643         break;
644 #if USE(JSVALUE64)
645     case Load64:
646         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
647         break;
648     case Load64ShiftInt52Right:
649         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
650         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
651         break;
652     case Load64ShiftInt52Left:
653         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
654         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
655         break;
656 #endif
657     case LoadDouble:
658         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
659         break;
660     default:
661         RELEASE_ASSERT_NOT_REACHED();
662     }
663 }
664     
665 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
666 {
667     switch (arrayMode.arrayClass()) {
668     case Array::OriginalArray: {
669         CRASH();
670 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
671         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
672         return result;
673 #endif
674     }
675         
676     case Array::Array:
677         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
678         return m_jit.branch32(
679             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
680         
681     case Array::NonArray:
682     case Array::OriginalNonArray:
683         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
684         return m_jit.branch32(
685             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
686         
687     case Array::PossiblyArray:
688         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
689         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
690     }
691     
692     RELEASE_ASSERT_NOT_REACHED();
693     return JITCompiler::Jump();
694 }
695
696 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
697 {
698     JITCompiler::JumpList result;
699     
700     switch (arrayMode.type()) {
701     case Array::Int32:
702         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
703
704     case Array::Double:
705         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
706
707     case Array::Contiguous:
708         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
709
710     case Array::Undecided:
711         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
712
713     case Array::ArrayStorage:
714     case Array::SlowPutArrayStorage: {
715         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
716         
717         if (arrayMode.isJSArray()) {
718             if (arrayMode.isSlowPut()) {
719                 result.append(
720                     m_jit.branchTest32(
721                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
722                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
723                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
724                 result.append(
725                     m_jit.branch32(
726                         MacroAssembler::Above, tempGPR,
727                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
728                 break;
729             }
730             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
731             result.append(
732                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
733             break;
734         }
735         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
736         if (arrayMode.isSlowPut()) {
737             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
738             result.append(
739                 m_jit.branch32(
740                     MacroAssembler::Above, tempGPR,
741                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
742             break;
743         }
744         result.append(
745             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
746         break;
747     }
748     default:
749         CRASH();
750         break;
751     }
752     
753     return result;
754 }
755
756 void SpeculativeJIT::checkArray(Node* node)
757 {
758     ASSERT(node->arrayMode().isSpecific());
759     ASSERT(!node->arrayMode().doesConversion());
760     
761     SpeculateCellOperand base(this, node->child1());
762     GPRReg baseReg = base.gpr();
763     
764     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
765         noResult(m_currentNode);
766         return;
767     }
768     
769     const ClassInfo* expectedClassInfo = 0;
770     
771     switch (node->arrayMode().type()) {
772     case Array::AnyTypedArray:
773     case Array::String:
774         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
775         break;
776     case Array::Int32:
777     case Array::Double:
778     case Array::Contiguous:
779     case Array::Undecided:
780     case Array::ArrayStorage:
781     case Array::SlowPutArrayStorage: {
782         GPRTemporary temp(this);
783         GPRReg tempGPR = temp.gpr();
784         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
785         speculationCheck(
786             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
787             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
788         
789         noResult(m_currentNode);
790         return;
791     }
792     case Array::DirectArguments:
793         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
794         noResult(m_currentNode);
795         return;
796     case Array::ScopedArguments:
797         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
798         noResult(m_currentNode);
799         return;
800     default:
801         speculateCellTypeWithoutTypeFiltering(
802             node->child1(), baseReg,
803             typeForTypedArrayType(node->arrayMode().typedArrayType()));
804         noResult(m_currentNode);
805         return;
806     }
807     
808     RELEASE_ASSERT(expectedClassInfo);
809     
810     GPRTemporary temp(this);
811     GPRTemporary temp2(this);
812     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
813     speculationCheck(
814         BadType, JSValueSource::unboxedCell(baseReg), node,
815         m_jit.branchPtr(
816             MacroAssembler::NotEqual,
817             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
818             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
819     
820     noResult(m_currentNode);
821 }
822
823 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
824 {
825     ASSERT(node->arrayMode().doesConversion());
826     
827     GPRTemporary temp(this);
828     GPRTemporary structure;
829     GPRReg tempGPR = temp.gpr();
830     GPRReg structureGPR = InvalidGPRReg;
831     
832     if (node->op() != ArrayifyToStructure) {
833         GPRTemporary realStructure(this);
834         structure.adopt(realStructure);
835         structureGPR = structure.gpr();
836     }
837         
838     // We can skip all that comes next if we already have array storage.
839     MacroAssembler::JumpList slowPath;
840     
841     if (node->op() == ArrayifyToStructure) {
842         slowPath.append(m_jit.branchWeakStructure(
843             JITCompiler::NotEqual,
844             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
845             node->structure()));
846     } else {
847         m_jit.load8(
848             MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
849         
850         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
851     }
852     
853     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
854         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
855     
856     noResult(m_currentNode);
857 }
858
859 void SpeculativeJIT::arrayify(Node* node)
860 {
861     ASSERT(node->arrayMode().isSpecific());
862     
863     SpeculateCellOperand base(this, node->child1());
864     
865     if (!node->child2()) {
866         arrayify(node, base.gpr(), InvalidGPRReg);
867         return;
868     }
869     
870     SpeculateInt32Operand property(this, node->child2());
871     
872     arrayify(node, base.gpr(), property.gpr());
873 }
874
875 GPRReg SpeculativeJIT::fillStorage(Edge edge)
876 {
877     VirtualRegister virtualRegister = edge->virtualRegister();
878     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
879     
880     switch (info.registerFormat()) {
881     case DataFormatNone: {
882         if (info.spillFormat() == DataFormatStorage) {
883             GPRReg gpr = allocate();
884             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
885             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
886             info.fillStorage(*m_stream, gpr);
887             return gpr;
888         }
889         
890         // Must be a cell; fill it as a cell and then return the pointer.
891         return fillSpeculateCell(edge);
892     }
893         
894     case DataFormatStorage: {
895         GPRReg gpr = info.gpr();
896         m_gprs.lock(gpr);
897         return gpr;
898     }
899         
900     default:
901         return fillSpeculateCell(edge);
902     }
903 }
904
905 void SpeculativeJIT::useChildren(Node* node)
906 {
907     if (node->flags() & NodeHasVarArgs) {
908         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
909             if (!!m_jit.graph().m_varArgChildren[childIdx])
910                 use(m_jit.graph().m_varArgChildren[childIdx]);
911         }
912     } else {
913         Edge child1 = node->child1();
914         if (!child1) {
915             ASSERT(!node->child2() && !node->child3());
916             return;
917         }
918         use(child1);
919         
920         Edge child2 = node->child2();
921         if (!child2) {
922             ASSERT(!node->child3());
923             return;
924         }
925         use(child2);
926         
927         Edge child3 = node->child3();
928         if (!child3)
929             return;
930         use(child3);
931     }
932 }
933
934 void SpeculativeJIT::compileIn(Node* node)
935 {
936     SpeculateCellOperand base(this, node->child2());
937     GPRReg baseGPR = base.gpr();
938     
939     if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
940         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
941             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
942             
943             GPRTemporary result(this);
944             GPRReg resultGPR = result.gpr();
945
946             use(node->child1());
947             
948             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
949             MacroAssembler::Label done = m_jit.label();
950             
951             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
952             // we can cast it to const AtomicStringImpl* safely.
953             auto slowPath = slowPathCall(
954                 jump.m_jump, this, operationInOptimize,
955                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
956                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
957             
958             stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
959             stubInfo->codeOrigin = node->origin.semantic;
960             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
961             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
962 #if USE(JSVALUE32_64)
963             stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
964             stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
965 #endif
966             stubInfo->patch.usedRegisters = usedRegisters();
967
968             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
969             addSlowPathGenerator(WTF::move(slowPath));
970
971             base.use();
972
973             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
974             return;
975         }
976     }
977
978     JSValueOperand key(this, node->child1());
979     JSValueRegs regs = key.jsValueRegs();
980         
981     GPRFlushedCallResult result(this);
982     GPRReg resultGPR = result.gpr();
983         
984     base.use();
985     key.use();
986         
987     flushRegisters();
988     callOperation(
989         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
990         baseGPR, regs);
991     m_jit.exceptionCheck();
992     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
993 }
994
995 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
996 {
997     unsigned branchIndexInBlock = detectPeepHoleBranch();
998     if (branchIndexInBlock != UINT_MAX) {
999         Node* branchNode = m_block->at(branchIndexInBlock);
1000
1001         ASSERT(node->adjustedRefCount() == 1);
1002         
1003         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1004     
1005         m_indexInBlock = branchIndexInBlock;
1006         m_currentNode = branchNode;
1007         
1008         return true;
1009     }
1010     
1011     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1012     
1013     return false;
1014 }
1015
1016 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1017 {
1018     unsigned branchIndexInBlock = detectPeepHoleBranch();
1019     if (branchIndexInBlock != UINT_MAX) {
1020         Node* branchNode = m_block->at(branchIndexInBlock);
1021
1022         ASSERT(node->adjustedRefCount() == 1);
1023         
1024         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1025     
1026         m_indexInBlock = branchIndexInBlock;
1027         m_currentNode = branchNode;
1028         
1029         return true;
1030     }
1031     
1032     nonSpeculativeNonPeepholeStrictEq(node, invert);
1033     
1034     return false;
1035 }
1036
1037 static const char* dataFormatString(DataFormat format)
1038 {
1039     // These values correspond to the DataFormat enum.
1040     const char* strings[] = {
1041         "[  ]",
1042         "[ i]",
1043         "[ d]",
1044         "[ c]",
1045         "Err!",
1046         "Err!",
1047         "Err!",
1048         "Err!",
1049         "[J ]",
1050         "[Ji]",
1051         "[Jd]",
1052         "[Jc]",
1053         "Err!",
1054         "Err!",
1055         "Err!",
1056         "Err!",
1057     };
1058     return strings[format];
1059 }
1060
1061 void SpeculativeJIT::dump(const char* label)
1062 {
1063     if (label)
1064         dataLogF("<%s>\n", label);
1065
1066     dataLogF("  gprs:\n");
1067     m_gprs.dump();
1068     dataLogF("  fprs:\n");
1069     m_fprs.dump();
1070     dataLogF("  VirtualRegisters:\n");
1071     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1072         GenerationInfo& info = m_generationInfo[i];
1073         if (info.alive())
1074             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1075         else
1076             dataLogF("    % 3d:[__][__]", i);
1077         if (info.registerFormat() == DataFormatDouble)
1078             dataLogF(":fpr%d\n", info.fpr());
1079         else if (info.registerFormat() != DataFormatNone
1080 #if USE(JSVALUE32_64)
1081             && !(info.registerFormat() & DataFormatJS)
1082 #endif
1083             ) {
1084             ASSERT(info.gpr() != InvalidGPRReg);
1085             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1086         } else
1087             dataLogF("\n");
1088     }
1089     if (label)
1090         dataLogF("</%s>\n", label);
1091 }
1092
1093 GPRTemporary::GPRTemporary()
1094     : m_jit(0)
1095     , m_gpr(InvalidGPRReg)
1096 {
1097 }
1098
1099 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1100     : m_jit(jit)
1101     , m_gpr(InvalidGPRReg)
1102 {
1103     m_gpr = m_jit->allocate();
1104 }
1105
1106 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1107     : m_jit(jit)
1108     , m_gpr(InvalidGPRReg)
1109 {
1110     m_gpr = m_jit->allocate(specific);
1111 }
1112
1113 #if USE(JSVALUE32_64)
1114 GPRTemporary::GPRTemporary(
1115     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1116     : m_jit(jit)
1117     , m_gpr(InvalidGPRReg)
1118 {
1119     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1120         m_gpr = m_jit->reuse(op1.gpr(which));
1121     else
1122         m_gpr = m_jit->allocate();
1123 }
1124 #endif // USE(JSVALUE32_64)
1125
1126 JSValueRegsTemporary::JSValueRegsTemporary() { }
1127
1128 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1129 #if USE(JSVALUE64)
1130     : m_gpr(jit)
1131 #else
1132     : m_payloadGPR(jit)
1133     , m_tagGPR(jit)
1134 #endif
1135 {
1136 }
1137
1138 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1139
1140 JSValueRegs JSValueRegsTemporary::regs()
1141 {
1142 #if USE(JSVALUE64)
1143     return JSValueRegs(m_gpr.gpr());
1144 #else
1145     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1146 #endif
1147 }
1148
1149 void GPRTemporary::adopt(GPRTemporary& other)
1150 {
1151     ASSERT(!m_jit);
1152     ASSERT(m_gpr == InvalidGPRReg);
1153     ASSERT(other.m_jit);
1154     ASSERT(other.m_gpr != InvalidGPRReg);
1155     m_jit = other.m_jit;
1156     m_gpr = other.m_gpr;
1157     other.m_jit = 0;
1158     other.m_gpr = InvalidGPRReg;
1159 }
1160
1161 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1162     : m_jit(jit)
1163     , m_fpr(InvalidFPRReg)
1164 {
1165     m_fpr = m_jit->fprAllocate();
1166 }
1167
1168 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1169     : m_jit(jit)
1170     , m_fpr(InvalidFPRReg)
1171 {
1172     if (m_jit->canReuse(op1.node()))
1173         m_fpr = m_jit->reuse(op1.fpr());
1174     else
1175         m_fpr = m_jit->fprAllocate();
1176 }
1177
1178 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1179     : m_jit(jit)
1180     , m_fpr(InvalidFPRReg)
1181 {
1182     if (m_jit->canReuse(op1.node()))
1183         m_fpr = m_jit->reuse(op1.fpr());
1184     else if (m_jit->canReuse(op2.node()))
1185         m_fpr = m_jit->reuse(op2.fpr());
1186     else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1187         m_fpr = m_jit->reuse(op1.fpr());
1188     else
1189         m_fpr = m_jit->fprAllocate();
1190 }
1191
1192 #if USE(JSVALUE32_64)
1193 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1194     : m_jit(jit)
1195     , m_fpr(InvalidFPRReg)
1196 {
1197     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1198         m_fpr = m_jit->reuse(op1.fpr());
1199     else
1200         m_fpr = m_jit->fprAllocate();
1201 }
1202 #endif
1203
1204 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1205 {
1206     BasicBlock* taken = branchNode->branchData()->taken.block;
1207     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1208     
1209     SpeculateDoubleOperand op1(this, node->child1());
1210     SpeculateDoubleOperand op2(this, node->child2());
1211     
1212     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1213     jump(notTaken);
1214 }
1215
1216 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1217 {
1218     BasicBlock* taken = branchNode->branchData()->taken.block;
1219     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1220
1221     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1222     
1223     if (taken == nextBlock()) {
1224         condition = MacroAssembler::NotEqual;
1225         BasicBlock* tmp = taken;
1226         taken = notTaken;
1227         notTaken = tmp;
1228     }
1229
1230     SpeculateCellOperand op1(this, node->child1());
1231     SpeculateCellOperand op2(this, node->child2());
1232     
1233     GPRReg op1GPR = op1.gpr();
1234     GPRReg op2GPR = op2.gpr();
1235     
1236     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1237         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1238             speculationCheck(
1239                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1240         }
1241         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1242             speculationCheck(
1243                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1244         }
1245     } else {
1246         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1247             speculationCheck(
1248                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1249                 m_jit.branchIfNotObject(op1GPR));
1250         }
1251         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1252             m_jit.branchTest8(
1253                 MacroAssembler::NonZero, 
1254                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1255                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1256
1257         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1258             speculationCheck(
1259                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1260                 m_jit.branchIfNotObject(op2GPR));
1261         }
1262         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1263             m_jit.branchTest8(
1264                 MacroAssembler::NonZero, 
1265                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1266                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1267     }
1268
1269     branchPtr(condition, op1GPR, op2GPR, taken);
1270     jump(notTaken);
1271 }
1272
1273 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1274 {
1275     BasicBlock* taken = branchNode->branchData()->taken.block;
1276     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1277
1278     // The branch instruction will branch to the taken block.
1279     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1280     if (taken == nextBlock()) {
1281         condition = JITCompiler::invert(condition);
1282         BasicBlock* tmp = taken;
1283         taken = notTaken;
1284         notTaken = tmp;
1285     }
1286
1287     if (node->child1()->isBooleanConstant()) {
1288         bool imm = node->child1()->asBoolean();
1289         SpeculateBooleanOperand op2(this, node->child2());
1290         branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1291     } else if (node->child2()->isBooleanConstant()) {
1292         SpeculateBooleanOperand op1(this, node->child1());
1293         bool imm = node->child2()->asBoolean();
1294         branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1295     } else {
1296         SpeculateBooleanOperand op1(this, node->child1());
1297         SpeculateBooleanOperand op2(this, node->child2());
1298         branch32(condition, op1.gpr(), op2.gpr(), taken);
1299     }
1300
1301     jump(notTaken);
1302 }
1303
1304 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1305 {
1306     BasicBlock* taken = branchNode->branchData()->taken.block;
1307     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1308
1309     // The branch instruction will branch to the taken block.
1310     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1311     if (taken == nextBlock()) {
1312         condition = JITCompiler::invert(condition);
1313         BasicBlock* tmp = taken;
1314         taken = notTaken;
1315         notTaken = tmp;
1316     }
1317
1318     if (node->child1()->isInt32Constant()) {
1319         int32_t imm = node->child1()->asInt32();
1320         SpeculateInt32Operand op2(this, node->child2());
1321         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1322     } else if (node->child2()->isInt32Constant()) {
1323         SpeculateInt32Operand op1(this, node->child1());
1324         int32_t imm = node->child2()->asInt32();
1325         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1326     } else {
1327         SpeculateInt32Operand op1(this, node->child1());
1328         SpeculateInt32Operand op2(this, node->child2());
1329         branch32(condition, op1.gpr(), op2.gpr(), taken);
1330     }
1331
1332     jump(notTaken);
1333 }
1334
1335 // Returns true if the compare is fused with a subsequent branch.
1336 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1337 {
1338     // Fused compare & branch.
1339     unsigned branchIndexInBlock = detectPeepHoleBranch();
1340     if (branchIndexInBlock != UINT_MAX) {
1341         Node* branchNode = m_block->at(branchIndexInBlock);
1342
1343         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1344         // so can be no intervening nodes to also reference the compare. 
1345         ASSERT(node->adjustedRefCount() == 1);
1346
1347         if (node->isBinaryUseKind(Int32Use))
1348             compilePeepHoleInt32Branch(node, branchNode, condition);
1349 #if USE(JSVALUE64)
1350         else if (node->isBinaryUseKind(Int52RepUse))
1351             compilePeepHoleInt52Branch(node, branchNode, condition);
1352 #endif // USE(JSVALUE64)
1353         else if (node->isBinaryUseKind(DoubleRepUse))
1354             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1355         else if (node->op() == CompareEq) {
1356             if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1357                 // Use non-peephole comparison, for now.
1358                 return false;
1359             }
1360             if (node->isBinaryUseKind(BooleanUse))
1361                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1362             else if (node->isBinaryUseKind(SymbolUse))
1363                 compilePeepHoleSymbolEquality(node, branchNode);
1364             else if (node->isBinaryUseKind(ObjectUse))
1365                 compilePeepHoleObjectEquality(node, branchNode);
1366             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1367                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1368             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1369                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1370             else if (!needsTypeCheck(node->child1(), SpecOther))
1371                 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1372             else if (!needsTypeCheck(node->child2(), SpecOther))
1373                 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1374             else {
1375                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1376                 return true;
1377             }
1378         } else {
1379             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1380             return true;
1381         }
1382
1383         use(node->child1());
1384         use(node->child2());
1385         m_indexInBlock = branchIndexInBlock;
1386         m_currentNode = branchNode;
1387         return true;
1388     }
1389     return false;
1390 }
1391
1392 void SpeculativeJIT::noticeOSRBirth(Node* node)
1393 {
1394     if (!node->hasVirtualRegister())
1395         return;
1396     
1397     VirtualRegister virtualRegister = node->virtualRegister();
1398     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1399     
1400     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1401 }
1402
1403 void SpeculativeJIT::compileMovHint(Node* node)
1404 {
1405     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1406     
1407     Node* child = node->child1().node();
1408     noticeOSRBirth(child);
1409     
1410     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1411 }
1412
1413 void SpeculativeJIT::bail(AbortReason reason)
1414 {
1415     if (verboseCompilationEnabled())
1416         dataLog("Bailing compilation.\n");
1417     m_compileOkay = true;
1418     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1419     clearGenerationInfo();
1420 }
1421
1422 void SpeculativeJIT::compileCurrentBlock()
1423 {
1424     ASSERT(m_compileOkay);
1425     
1426     if (!m_block)
1427         return;
1428     
1429     ASSERT(m_block->isReachable);
1430     
1431     m_jit.blockHeads()[m_block->index] = m_jit.label();
1432
1433     if (!m_block->intersectionOfCFAHasVisited) {
1434         // Don't generate code for basic blocks that are unreachable according to CFA.
1435         // But to be sure that nobody has generated a jump to this block, drop in a
1436         // breakpoint here.
1437         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1438         return;
1439     }
1440
1441     m_stream->appendAndLog(VariableEvent::reset());
1442     
1443     m_jit.jitAssertHasValidCallFrame();
1444     m_jit.jitAssertTagsInPlace();
1445     m_jit.jitAssertArgumentCountSane();
1446
1447     m_state.reset();
1448     m_state.beginBasicBlock(m_block);
1449     
1450     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1451         int operand = m_block->variablesAtHead.operandForIndex(i);
1452         Node* node = m_block->variablesAtHead[i];
1453         if (!node)
1454             continue; // No need to record dead SetLocal's.
1455         
1456         VariableAccessData* variable = node->variableAccessData();
1457         DataFormat format;
1458         if (!node->refCount())
1459             continue; // No need to record dead SetLocal's.
1460         format = dataFormatFor(variable->flushFormat());
1461         m_stream->appendAndLog(
1462             VariableEvent::setLocal(
1463                 VirtualRegister(operand),
1464                 variable->machineLocal(),
1465                 format));
1466     }
1467
1468     m_origin = NodeOrigin();
1469     
1470     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1471         m_currentNode = m_block->at(m_indexInBlock);
1472         
1473         // We may have hit a contradiction that the CFA was aware of but that the JIT
1474         // didn't cause directly.
1475         if (!m_state.isValid()) {
1476             bail(DFGBailedAtTopOfBlock);
1477             return;
1478         }
1479
1480         m_interpreter.startExecuting();
1481         m_jit.setForNode(m_currentNode);
1482         m_origin = m_currentNode->origin;
1483         if (validationEnabled())
1484             m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1485         m_lastGeneratedNode = m_currentNode->op();
1486         
1487         ASSERT(m_currentNode->shouldGenerate());
1488         
1489         if (verboseCompilationEnabled()) {
1490             dataLogF(
1491                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1492                 (int)m_currentNode->index(),
1493                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1494             dataLog("\n");
1495         }
1496         
1497         m_jit.jitAssertNoException();
1498
1499         compile(m_currentNode);
1500         
1501         if (belongsInMinifiedGraph(m_currentNode->op()))
1502             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1503         
1504 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1505         m_jit.clearRegisterAllocationOffsets();
1506 #endif
1507         
1508         if (!m_compileOkay) {
1509             bail(DFGBailedAtEndOfNode);
1510             return;
1511         }
1512         
1513         // Make sure that the abstract state is rematerialized for the next node.
1514         m_interpreter.executeEffects(m_indexInBlock);
1515     }
1516     
1517     // Perform the most basic verification that children have been used correctly.
1518     if (!ASSERT_DISABLED) {
1519         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1520             GenerationInfo& info = m_generationInfo[index];
1521             RELEASE_ASSERT(!info.alive());
1522         }
1523     }
1524 }
1525
1526 // If we are making type predictions about our arguments then
1527 // we need to check that they are correct on function entry.
1528 void SpeculativeJIT::checkArgumentTypes()
1529 {
1530     ASSERT(!m_currentNode);
1531     m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1532
1533     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1534         Node* node = m_jit.graph().m_arguments[i];
1535         if (!node) {
1536             // The argument is dead. We don't do any checks for such arguments.
1537             continue;
1538         }
1539         
1540         ASSERT(node->op() == SetArgument);
1541         ASSERT(node->shouldGenerate());
1542
1543         VariableAccessData* variableAccessData = node->variableAccessData();
1544         FlushFormat format = variableAccessData->flushFormat();
1545         
1546         if (format == FlushedJSValue)
1547             continue;
1548         
1549         VirtualRegister virtualRegister = variableAccessData->local();
1550
1551         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1552         
1553 #if USE(JSVALUE64)
1554         switch (format) {
1555         case FlushedInt32: {
1556             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1557             break;
1558         }
1559         case FlushedBoolean: {
1560             GPRTemporary temp(this);
1561             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1562             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1563             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1564             break;
1565         }
1566         case FlushedCell: {
1567             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1568             break;
1569         }
1570         default:
1571             RELEASE_ASSERT_NOT_REACHED();
1572             break;
1573         }
1574 #else
1575         switch (format) {
1576         case FlushedInt32: {
1577             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1578             break;
1579         }
1580         case FlushedBoolean: {
1581             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1582             break;
1583         }
1584         case FlushedCell: {
1585             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1586             break;
1587         }
1588         default:
1589             RELEASE_ASSERT_NOT_REACHED();
1590             break;
1591         }
1592 #endif
1593     }
1594
1595     m_origin = NodeOrigin();
1596 }
1597
1598 bool SpeculativeJIT::compile()
1599 {
1600     checkArgumentTypes();
1601     
1602     ASSERT(!m_currentNode);
1603     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1604         m_jit.setForBlockIndex(blockIndex);
1605         m_block = m_jit.graph().block(blockIndex);
1606         compileCurrentBlock();
1607     }
1608     linkBranches();
1609     return true;
1610 }
1611
1612 void SpeculativeJIT::createOSREntries()
1613 {
1614     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1615         BasicBlock* block = m_jit.graph().block(blockIndex);
1616         if (!block)
1617             continue;
1618         if (!block->isOSRTarget)
1619             continue;
1620         
1621         // Currently we don't have OSR entry trampolines. We could add them
1622         // here if need be.
1623         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1624     }
1625 }
1626
1627 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1628 {
1629     unsigned osrEntryIndex = 0;
1630     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1631         BasicBlock* block = m_jit.graph().block(blockIndex);
1632         if (!block)
1633             continue;
1634         if (!block->isOSRTarget)
1635             continue;
1636         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1637     }
1638     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1639     
1640     if (verboseCompilationEnabled()) {
1641         DumpContext dumpContext;
1642         dataLog("OSR Entries:\n");
1643         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1644             dataLog("    ", inContext(entryData, &dumpContext), "\n");
1645         if (!dumpContext.isEmpty())
1646             dumpContext.dump(WTF::dataFile());
1647     }
1648 }
1649
1650 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1651 {
1652     Edge child3 = m_jit.graph().varArgChild(node, 2);
1653     Edge child4 = m_jit.graph().varArgChild(node, 3);
1654
1655     ArrayMode arrayMode = node->arrayMode();
1656     
1657     GPRReg baseReg = base.gpr();
1658     GPRReg propertyReg = property.gpr();
1659     
1660     SpeculateDoubleOperand value(this, child3);
1661
1662     FPRReg valueReg = value.fpr();
1663     
1664     DFG_TYPE_CHECK(
1665         JSValueRegs(), child3, SpecFullRealNumber,
1666         m_jit.branchDouble(
1667             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1668     
1669     if (!m_compileOkay)
1670         return;
1671     
1672     StorageOperand storage(this, child4);
1673     GPRReg storageReg = storage.gpr();
1674
1675     if (node->op() == PutByValAlias) {
1676         // Store the value to the array.
1677         GPRReg propertyReg = property.gpr();
1678         FPRReg valueReg = value.fpr();
1679         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1680         
1681         noResult(m_currentNode);
1682         return;
1683     }
1684     
1685     GPRTemporary temporary;
1686     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1687
1688     MacroAssembler::Jump slowCase;
1689     
1690     if (arrayMode.isInBounds()) {
1691         speculationCheck(
1692             OutOfBounds, JSValueRegs(), 0,
1693             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1694     } else {
1695         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1696         
1697         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1698         
1699         if (!arrayMode.isOutOfBounds())
1700             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1701         
1702         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1703         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1704         
1705         inBounds.link(&m_jit);
1706     }
1707     
1708     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1709
1710     base.use();
1711     property.use();
1712     value.use();
1713     storage.use();
1714     
1715     if (arrayMode.isOutOfBounds()) {
1716         addSlowPathGenerator(
1717             slowPathCall(
1718                 slowCase, this,
1719                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1720                 NoResult, baseReg, propertyReg, valueReg));
1721     }
1722
1723     noResult(m_currentNode, UseChildrenCalledExplicitly);
1724 }
1725
1726 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1727 {
1728     SpeculateCellOperand string(this, node->child1());
1729     SpeculateStrictInt32Operand index(this, node->child2());
1730     StorageOperand storage(this, node->child3());
1731
1732     GPRReg stringReg = string.gpr();
1733     GPRReg indexReg = index.gpr();
1734     GPRReg storageReg = storage.gpr();
1735     
1736     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1737
1738     // unsigned comparison so we can filter out negative indices and indices that are too large
1739     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1740
1741     GPRTemporary scratch(this);
1742     GPRReg scratchReg = scratch.gpr();
1743
1744     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1745
1746     // Load the character into scratchReg
1747     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1748
1749     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1750     JITCompiler::Jump cont8Bit = m_jit.jump();
1751
1752     is16Bit.link(&m_jit);
1753
1754     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1755
1756     cont8Bit.link(&m_jit);
1757
1758     int32Result(scratchReg, m_currentNode);
1759 }
1760
1761 void SpeculativeJIT::compileGetByValOnString(Node* node)
1762 {
1763     SpeculateCellOperand base(this, node->child1());
1764     SpeculateStrictInt32Operand property(this, node->child2());
1765     StorageOperand storage(this, node->child3());
1766     GPRReg baseReg = base.gpr();
1767     GPRReg propertyReg = property.gpr();
1768     GPRReg storageReg = storage.gpr();
1769
1770     GPRTemporary scratch(this);
1771     GPRReg scratchReg = scratch.gpr();
1772 #if USE(JSVALUE32_64)
1773     GPRTemporary resultTag;
1774     GPRReg resultTagReg = InvalidGPRReg;
1775     if (node->arrayMode().isOutOfBounds()) {
1776         GPRTemporary realResultTag(this);
1777         resultTag.adopt(realResultTag);
1778         resultTagReg = resultTag.gpr();
1779     }
1780 #endif
1781
1782     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1783
1784     // unsigned comparison so we can filter out negative indices and indices that are too large
1785     JITCompiler::Jump outOfBounds = m_jit.branch32(
1786         MacroAssembler::AboveOrEqual, propertyReg,
1787         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1788     if (node->arrayMode().isInBounds())
1789         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1790
1791     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1792
1793     // Load the character into scratchReg
1794     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1795
1796     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1797     JITCompiler::Jump cont8Bit = m_jit.jump();
1798
1799     is16Bit.link(&m_jit);
1800
1801     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1802
1803     JITCompiler::Jump bigCharacter =
1804         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1805
1806     // 8 bit string values don't need the isASCII check.
1807     cont8Bit.link(&m_jit);
1808
1809     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1810     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1811     m_jit.loadPtr(scratchReg, scratchReg);
1812
1813     addSlowPathGenerator(
1814         slowPathCall(
1815             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1816
1817     if (node->arrayMode().isOutOfBounds()) {
1818 #if USE(JSVALUE32_64)
1819         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1820 #endif
1821
1822         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1823         if (globalObject->stringPrototypeChainIsSane()) {
1824             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
1825             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
1826             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
1827             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
1828             // indexed properties either.
1829             // https://bugs.webkit.org/show_bug.cgi?id=144668
1830             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
1831             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
1832             
1833 #if USE(JSVALUE64)
1834             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1835                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
1836 #else
1837             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1838                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
1839                 baseReg, propertyReg));
1840 #endif
1841         } else {
1842 #if USE(JSVALUE64)
1843             addSlowPathGenerator(
1844                 slowPathCall(
1845                     outOfBounds, this, operationGetByValStringInt,
1846                     scratchReg, baseReg, propertyReg));
1847 #else
1848             addSlowPathGenerator(
1849                 slowPathCall(
1850                     outOfBounds, this, operationGetByValStringInt,
1851                     resultTagReg, scratchReg, baseReg, propertyReg));
1852 #endif
1853         }
1854         
1855 #if USE(JSVALUE64)
1856         jsValueResult(scratchReg, m_currentNode);
1857 #else
1858         jsValueResult(resultTagReg, scratchReg, m_currentNode);
1859 #endif
1860     } else
1861         cellResult(scratchReg, m_currentNode);
1862 }
1863
1864 void SpeculativeJIT::compileFromCharCode(Node* node)
1865 {
1866     SpeculateStrictInt32Operand property(this, node->child1());
1867     GPRReg propertyReg = property.gpr();
1868     GPRTemporary smallStrings(this);
1869     GPRTemporary scratch(this);
1870     GPRReg scratchReg = scratch.gpr();
1871     GPRReg smallStringsReg = smallStrings.gpr();
1872
1873     JITCompiler::JumpList slowCases;
1874     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
1875     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
1876     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
1877
1878     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
1879     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
1880     cellResult(scratchReg, m_currentNode);
1881 }
1882
1883 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
1884 {
1885     VirtualRegister virtualRegister = node->virtualRegister();
1886     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1887
1888     switch (info.registerFormat()) {
1889     case DataFormatStorage:
1890         RELEASE_ASSERT_NOT_REACHED();
1891
1892     case DataFormatBoolean:
1893     case DataFormatCell:
1894         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1895         return GeneratedOperandTypeUnknown;
1896
1897     case DataFormatNone:
1898     case DataFormatJSCell:
1899     case DataFormatJS:
1900     case DataFormatJSBoolean:
1901     case DataFormatJSDouble:
1902         return GeneratedOperandJSValue;
1903
1904     case DataFormatJSInt32:
1905     case DataFormatInt32:
1906         return GeneratedOperandInteger;
1907
1908     default:
1909         RELEASE_ASSERT_NOT_REACHED();
1910         return GeneratedOperandTypeUnknown;
1911     }
1912 }
1913
1914 void SpeculativeJIT::compileValueToInt32(Node* node)
1915 {
1916     switch (node->child1().useKind()) {
1917 #if USE(JSVALUE64)
1918     case Int52RepUse: {
1919         SpeculateStrictInt52Operand op1(this, node->child1());
1920         GPRTemporary result(this, Reuse, op1);
1921         GPRReg op1GPR = op1.gpr();
1922         GPRReg resultGPR = result.gpr();
1923         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
1924         int32Result(resultGPR, node, DataFormatInt32);
1925         return;
1926     }
1927 #endif // USE(JSVALUE64)
1928         
1929     case DoubleRepUse: {
1930         GPRTemporary result(this);
1931         SpeculateDoubleOperand op1(this, node->child1());
1932         FPRReg fpr = op1.fpr();
1933         GPRReg gpr = result.gpr();
1934         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
1935         
1936         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
1937         
1938         int32Result(gpr, node);
1939         return;
1940     }
1941     
1942     case NumberUse:
1943     case NotCellUse: {
1944         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
1945         case GeneratedOperandInteger: {
1946             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
1947             GPRTemporary result(this, Reuse, op1);
1948             m_jit.move(op1.gpr(), result.gpr());
1949             int32Result(result.gpr(), node, op1.format());
1950             return;
1951         }
1952         case GeneratedOperandJSValue: {
1953             GPRTemporary result(this);
1954 #if USE(JSVALUE64)
1955             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1956
1957             GPRReg gpr = op1.gpr();
1958             GPRReg resultGpr = result.gpr();
1959             FPRTemporary tempFpr(this);
1960             FPRReg fpr = tempFpr.fpr();
1961
1962             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
1963             JITCompiler::JumpList converted;
1964
1965             if (node->child1().useKind() == NumberUse) {
1966                 DFG_TYPE_CHECK(
1967                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
1968                     m_jit.branchTest64(
1969                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
1970             } else {
1971                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
1972                 
1973                 DFG_TYPE_CHECK(
1974                     JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
1975                 
1976                 // It's not a cell: so true turns into 1 and all else turns into 0.
1977                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
1978                 converted.append(m_jit.jump());
1979                 
1980                 isNumber.link(&m_jit);
1981             }
1982
1983             // First, if we get here we have a double encoded as a JSValue
1984             m_jit.move(gpr, resultGpr);
1985             unboxDouble(resultGpr, fpr);
1986
1987             silentSpillAllRegisters(resultGpr);
1988             callOperation(toInt32, resultGpr, fpr);
1989             silentFillAllRegisters(resultGpr);
1990
1991             converted.append(m_jit.jump());
1992
1993             isInteger.link(&m_jit);
1994             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
1995
1996             converted.link(&m_jit);
1997 #else
1998             Node* childNode = node->child1().node();
1999             VirtualRegister virtualRegister = childNode->virtualRegister();
2000             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2001
2002             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2003
2004             GPRReg payloadGPR = op1.payloadGPR();
2005             GPRReg resultGpr = result.gpr();
2006         
2007             JITCompiler::JumpList converted;
2008
2009             if (info.registerFormat() == DataFormatJSInt32)
2010                 m_jit.move(payloadGPR, resultGpr);
2011             else {
2012                 GPRReg tagGPR = op1.tagGPR();
2013                 FPRTemporary tempFpr(this);
2014                 FPRReg fpr = tempFpr.fpr();
2015                 FPRTemporary scratch(this);
2016
2017                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2018
2019                 if (node->child1().useKind() == NumberUse) {
2020                     DFG_TYPE_CHECK(
2021                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2022                         m_jit.branch32(
2023                             MacroAssembler::AboveOrEqual, tagGPR,
2024                             TrustedImm32(JSValue::LowestTag)));
2025                 } else {
2026                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2027                     
2028                     DFG_TYPE_CHECK(
2029                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2030                         m_jit.branchIfCell(op1.jsValueRegs()));
2031                     
2032                     // It's not a cell: so true turns into 1 and all else turns into 0.
2033                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2034                     m_jit.move(TrustedImm32(0), resultGpr);
2035                     converted.append(m_jit.jump());
2036                     
2037                     isBoolean.link(&m_jit);
2038                     m_jit.move(payloadGPR, resultGpr);
2039                     converted.append(m_jit.jump());
2040                     
2041                     isNumber.link(&m_jit);
2042                 }
2043
2044                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2045
2046                 silentSpillAllRegisters(resultGpr);
2047                 callOperation(toInt32, resultGpr, fpr);
2048                 silentFillAllRegisters(resultGpr);
2049
2050                 converted.append(m_jit.jump());
2051
2052                 isInteger.link(&m_jit);
2053                 m_jit.move(payloadGPR, resultGpr);
2054
2055                 converted.link(&m_jit);
2056             }
2057 #endif
2058             int32Result(resultGpr, node);
2059             return;
2060         }
2061         case GeneratedOperandTypeUnknown:
2062             RELEASE_ASSERT(!m_compileOkay);
2063             return;
2064         }
2065         RELEASE_ASSERT_NOT_REACHED();
2066         return;
2067     }
2068     
2069     default:
2070         ASSERT(!m_compileOkay);
2071         return;
2072     }
2073 }
2074
2075 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2076 {
2077     if (doesOverflow(node->arithMode())) {
2078         // We know that this sometimes produces doubles. So produce a double every
2079         // time. This at least allows subsequent code to not have weird conditionals.
2080             
2081         SpeculateInt32Operand op1(this, node->child1());
2082         FPRTemporary result(this);
2083             
2084         GPRReg inputGPR = op1.gpr();
2085         FPRReg outputFPR = result.fpr();
2086             
2087         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2088             
2089         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2090         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2091         positive.link(&m_jit);
2092             
2093         doubleResult(outputFPR, node);
2094         return;
2095     }
2096     
2097     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2098
2099     SpeculateInt32Operand op1(this, node->child1());
2100     GPRTemporary result(this);
2101
2102     m_jit.move(op1.gpr(), result.gpr());
2103
2104     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2105
2106     int32Result(result.gpr(), node, op1.format());
2107 }
2108
2109 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2110 {
2111     SpeculateDoubleOperand op1(this, node->child1());
2112     FPRTemporary scratch(this);
2113     GPRTemporary result(this);
2114     
2115     FPRReg valueFPR = op1.fpr();
2116     FPRReg scratchFPR = scratch.fpr();
2117     GPRReg resultGPR = result.gpr();
2118
2119     JITCompiler::JumpList failureCases;
2120     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2121     m_jit.branchConvertDoubleToInt32(
2122         valueFPR, resultGPR, failureCases, scratchFPR,
2123         shouldCheckNegativeZero(node->arithMode()));
2124     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2125
2126     int32Result(resultGPR, node);
2127 }
2128
2129 void SpeculativeJIT::compileDoubleRep(Node* node)
2130 {
2131     switch (node->child1().useKind()) {
2132     case RealNumberUse: {
2133         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2134         FPRTemporary result(this);
2135         
2136         JSValueRegs op1Regs = op1.jsValueRegs();
2137         FPRReg resultFPR = result.fpr();
2138         
2139 #if USE(JSVALUE64)
2140         GPRTemporary temp(this);
2141         GPRReg tempGPR = temp.gpr();
2142         m_jit.move(op1Regs.gpr(), tempGPR);
2143         m_jit.unboxDoubleWithoutAssertions(tempGPR, resultFPR);
2144 #else
2145         FPRTemporary temp(this);
2146         FPRReg tempFPR = temp.fpr();
2147         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2148 #endif
2149         
2150         JITCompiler::Jump done = m_jit.branchDouble(
2151             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2152         
2153         DFG_TYPE_CHECK(
2154             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2155         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2156         
2157         done.link(&m_jit);
2158         
2159         doubleResult(resultFPR, node);
2160         return;
2161     }
2162     
2163     case NotCellUse:
2164     case NumberUse: {
2165         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2166
2167         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2168         if (isInt32Speculation(possibleTypes)) {
2169             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2170             FPRTemporary result(this);
2171             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2172             doubleResult(result.fpr(), node);
2173             return;
2174         }
2175
2176         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2177         FPRTemporary result(this);
2178
2179 #if USE(JSVALUE64)
2180         GPRTemporary temp(this);
2181
2182         GPRReg op1GPR = op1.gpr();
2183         GPRReg tempGPR = temp.gpr();
2184         FPRReg resultFPR = result.fpr();
2185         JITCompiler::JumpList done;
2186
2187         JITCompiler::Jump isInteger = m_jit.branch64(
2188             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2189
2190         if (node->child1().useKind() == NotCellUse) {
2191             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2192             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2193
2194             static const double zero = 0;
2195             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2196
2197             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2198             done.append(isNull);
2199
2200             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2201                 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2202
2203             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2204             static const double one = 1;
2205             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2206             done.append(m_jit.jump());
2207             done.append(isFalse);
2208
2209             isUndefined.link(&m_jit);
2210             static const double NaN = PNaN;
2211             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2212             done.append(m_jit.jump());
2213
2214             isNumber.link(&m_jit);
2215         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2216             typeCheck(
2217                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2218                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2219         }
2220     
2221         m_jit.move(op1GPR, tempGPR);
2222         unboxDouble(tempGPR, resultFPR);
2223         done.append(m_jit.jump());
2224     
2225         isInteger.link(&m_jit);
2226         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2227         done.link(&m_jit);
2228 #else // USE(JSVALUE64) -> this is the 32_64 case
2229         FPRTemporary temp(this);
2230     
2231         GPRReg op1TagGPR = op1.tagGPR();
2232         GPRReg op1PayloadGPR = op1.payloadGPR();
2233         FPRReg tempFPR = temp.fpr();
2234         FPRReg resultFPR = result.fpr();
2235         JITCompiler::JumpList done;
2236     
2237         JITCompiler::Jump isInteger = m_jit.branch32(
2238             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2239
2240         if (node->child1().useKind() == NotCellUse) {
2241             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2242             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2243
2244             static const double zero = 0;
2245             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2246
2247             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2248             done.append(isNull);
2249
2250             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2251
2252             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2253             static const double one = 1;
2254             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2255             done.append(m_jit.jump());
2256             done.append(isFalse);
2257
2258             isUndefined.link(&m_jit);
2259             static const double NaN = PNaN;
2260             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2261             done.append(m_jit.jump());
2262
2263             isNumber.link(&m_jit);
2264         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2265             typeCheck(
2266                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2267                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2268         }
2269
2270         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2271         done.append(m_jit.jump());
2272     
2273         isInteger.link(&m_jit);
2274         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2275         done.link(&m_jit);
2276 #endif // USE(JSVALUE64)
2277     
2278         doubleResult(resultFPR, node);
2279         return;
2280     }
2281         
2282 #if USE(JSVALUE64)
2283     case Int52RepUse: {
2284         SpeculateStrictInt52Operand value(this, node->child1());
2285         FPRTemporary result(this);
2286         
2287         GPRReg valueGPR = value.gpr();
2288         FPRReg resultFPR = result.fpr();
2289
2290         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2291         
2292         doubleResult(resultFPR, node);
2293         return;
2294     }
2295 #endif // USE(JSVALUE64)
2296         
2297     default:
2298         RELEASE_ASSERT_NOT_REACHED();
2299         return;
2300     }
2301 }
2302
2303 void SpeculativeJIT::compileValueRep(Node* node)
2304 {
2305     switch (node->child1().useKind()) {
2306     case DoubleRepUse: {
2307         SpeculateDoubleOperand value(this, node->child1());
2308         JSValueRegsTemporary result(this);
2309         
2310         FPRReg valueFPR = value.fpr();
2311         JSValueRegs resultRegs = result.regs();
2312         
2313         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2314         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2315         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2316         // local was purified.
2317         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2318             m_jit.purifyNaN(valueFPR);
2319
2320         boxDouble(valueFPR, resultRegs);
2321         
2322         jsValueResult(resultRegs, node);
2323         return;
2324     }
2325         
2326 #if USE(JSVALUE64)
2327     case Int52RepUse: {
2328         SpeculateStrictInt52Operand value(this, node->child1());
2329         GPRTemporary result(this);
2330         
2331         GPRReg valueGPR = value.gpr();
2332         GPRReg resultGPR = result.gpr();
2333         
2334         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2335         
2336         jsValueResult(resultGPR, node);
2337         return;
2338     }
2339 #endif // USE(JSVALUE64)
2340         
2341     default:
2342         RELEASE_ASSERT_NOT_REACHED();
2343         return;
2344     }
2345 }
2346
2347 static double clampDoubleToByte(double d)
2348 {
2349     d += 0.5;
2350     if (!(d > 0))
2351         d = 0;
2352     else if (d > 255)
2353         d = 255;
2354     return d;
2355 }
2356
2357 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2358 {
2359     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2360     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2361     jit.xorPtr(result, result);
2362     MacroAssembler::Jump clamped = jit.jump();
2363     tooBig.link(&jit);
2364     jit.move(JITCompiler::TrustedImm32(255), result);
2365     clamped.link(&jit);
2366     inBounds.link(&jit);
2367 }
2368
2369 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2370 {
2371     // Unordered compare so we pick up NaN
2372     static const double zero = 0;
2373     static const double byteMax = 255;
2374     static const double half = 0.5;
2375     jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2376     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2377     jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2378     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2379     
2380     jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2381     // FIXME: This should probably just use a floating point round!
2382     // https://bugs.webkit.org/show_bug.cgi?id=72054
2383     jit.addDouble(source, scratch);
2384     jit.truncateDoubleToInt32(scratch, result);   
2385     MacroAssembler::Jump truncatedInt = jit.jump();
2386     
2387     tooSmall.link(&jit);
2388     jit.xorPtr(result, result);
2389     MacroAssembler::Jump zeroed = jit.jump();
2390     
2391     tooBig.link(&jit);
2392     jit.move(JITCompiler::TrustedImm32(255), result);
2393     
2394     truncatedInt.link(&jit);
2395     zeroed.link(&jit);
2396
2397 }
2398
2399 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2400 {
2401     if (node->op() == PutByValAlias)
2402         return JITCompiler::Jump();
2403     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2404         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2405     if (view) {
2406         uint32_t length = view->length();
2407         Node* indexNode = m_jit.graph().child(node, 1).node();
2408         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2409             return JITCompiler::Jump();
2410         return m_jit.branch32(
2411             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2412     }
2413     return m_jit.branch32(
2414         MacroAssembler::AboveOrEqual, indexGPR,
2415         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2416 }
2417
2418 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2419 {
2420     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2421     if (!jump.isSet())
2422         return;
2423     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2424 }
2425
2426 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2427 {
2428     ASSERT(isInt(type));
2429     
2430     SpeculateCellOperand base(this, node->child1());
2431     SpeculateStrictInt32Operand property(this, node->child2());
2432     StorageOperand storage(this, node->child3());
2433
2434     GPRReg baseReg = base.gpr();
2435     GPRReg propertyReg = property.gpr();
2436     GPRReg storageReg = storage.gpr();
2437
2438     GPRTemporary result(this);
2439     GPRReg resultReg = result.gpr();
2440
2441     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2442
2443     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2444     switch (elementSize(type)) {
2445     case 1:
2446         if (isSigned(type))
2447             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2448         else
2449             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2450         break;
2451     case 2:
2452         if (isSigned(type))
2453             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2454         else
2455             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2456         break;
2457     case 4:
2458         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2459         break;
2460     default:
2461         CRASH();
2462     }
2463     if (elementSize(type) < 4 || isSigned(type)) {
2464         int32Result(resultReg, node);
2465         return;
2466     }
2467     
2468     ASSERT(elementSize(type) == 4 && !isSigned(type));
2469     if (node->shouldSpeculateInt32()) {
2470         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2471         int32Result(resultReg, node);
2472         return;
2473     }
2474     
2475 #if USE(JSVALUE64)
2476     if (node->shouldSpeculateMachineInt()) {
2477         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2478         strictInt52Result(resultReg, node);
2479         return;
2480     }
2481 #endif
2482     
2483     FPRTemporary fresult(this);
2484     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2485     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2486     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2487     positive.link(&m_jit);
2488     doubleResult(fresult.fpr(), node);
2489 }
2490
2491 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2492 {
2493     ASSERT(isInt(type));
2494     
2495     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2496     GPRReg storageReg = storage.gpr();
2497     
2498     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2499     
2500     GPRTemporary value;
2501     GPRReg valueGPR = InvalidGPRReg;
2502     
2503     if (valueUse->isConstant()) {
2504         JSValue jsValue = valueUse->asJSValue();
2505         if (!jsValue.isNumber()) {
2506             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2507             noResult(node);
2508             return;
2509         }
2510         double d = jsValue.asNumber();
2511         if (isClamped(type)) {
2512             ASSERT(elementSize(type) == 1);
2513             d = clampDoubleToByte(d);
2514         }
2515         GPRTemporary scratch(this);
2516         GPRReg scratchReg = scratch.gpr();
2517         m_jit.move(Imm32(toInt32(d)), scratchReg);
2518         value.adopt(scratch);
2519         valueGPR = scratchReg;
2520     } else {
2521         switch (valueUse.useKind()) {
2522         case Int32Use: {
2523             SpeculateInt32Operand valueOp(this, valueUse);
2524             GPRTemporary scratch(this);
2525             GPRReg scratchReg = scratch.gpr();
2526             m_jit.move(valueOp.gpr(), scratchReg);
2527             if (isClamped(type)) {
2528                 ASSERT(elementSize(type) == 1);
2529                 compileClampIntegerToByte(m_jit, scratchReg);
2530             }
2531             value.adopt(scratch);
2532             valueGPR = scratchReg;
2533             break;
2534         }
2535             
2536 #if USE(JSVALUE64)
2537         case Int52RepUse: {
2538             SpeculateStrictInt52Operand valueOp(this, valueUse);
2539             GPRTemporary scratch(this);
2540             GPRReg scratchReg = scratch.gpr();
2541             m_jit.move(valueOp.gpr(), scratchReg);
2542             if (isClamped(type)) {
2543                 ASSERT(elementSize(type) == 1);
2544                 MacroAssembler::Jump inBounds = m_jit.branch64(
2545                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2546                 MacroAssembler::Jump tooBig = m_jit.branch64(
2547                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2548                 m_jit.move(TrustedImm32(0), scratchReg);
2549                 MacroAssembler::Jump clamped = m_jit.jump();
2550                 tooBig.link(&m_jit);
2551                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2552                 clamped.link(&m_jit);
2553                 inBounds.link(&m_jit);
2554             }
2555             value.adopt(scratch);
2556             valueGPR = scratchReg;
2557             break;
2558         }
2559 #endif // USE(JSVALUE64)
2560             
2561         case DoubleRepUse: {
2562             if (isClamped(type)) {
2563                 ASSERT(elementSize(type) == 1);
2564                 SpeculateDoubleOperand valueOp(this, valueUse);
2565                 GPRTemporary result(this);
2566                 FPRTemporary floatScratch(this);
2567                 FPRReg fpr = valueOp.fpr();
2568                 GPRReg gpr = result.gpr();
2569                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2570                 value.adopt(result);
2571                 valueGPR = gpr;
2572             } else {
2573                 SpeculateDoubleOperand valueOp(this, valueUse);
2574                 GPRTemporary result(this);
2575                 FPRReg fpr = valueOp.fpr();
2576                 GPRReg gpr = result.gpr();
2577                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2578                 m_jit.xorPtr(gpr, gpr);
2579                 MacroAssembler::Jump fixed = m_jit.jump();
2580                 notNaN.link(&m_jit);
2581                 
2582                 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2583                     fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2584                 
2585                 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
2586                 
2587                 fixed.link(&m_jit);
2588                 value.adopt(result);
2589                 valueGPR = gpr;
2590             }
2591             break;
2592         }
2593             
2594         default:
2595             RELEASE_ASSERT_NOT_REACHED();
2596             break;
2597         }
2598     }
2599     
2600     ASSERT_UNUSED(valueGPR, valueGPR != property);
2601     ASSERT(valueGPR != base);
2602     ASSERT(valueGPR != storageReg);
2603     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2604     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2605         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2606         outOfBounds = MacroAssembler::Jump();
2607     }
2608
2609     switch (elementSize(type)) {
2610     case 1:
2611         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2612         break;
2613     case 2:
2614         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2615         break;
2616     case 4:
2617         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2618         break;
2619     default:
2620         CRASH();
2621     }
2622     if (outOfBounds.isSet())
2623         outOfBounds.link(&m_jit);
2624     noResult(node);
2625 }
2626
2627 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2628 {
2629     ASSERT(isFloat(type));
2630     
2631     SpeculateCellOperand base(this, node->child1());
2632     SpeculateStrictInt32Operand property(this, node->child2());
2633     StorageOperand storage(this, node->child3());
2634
2635     GPRReg baseReg = base.gpr();
2636     GPRReg propertyReg = property.gpr();
2637     GPRReg storageReg = storage.gpr();
2638
2639     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2640
2641     FPRTemporary result(this);
2642     FPRReg resultReg = result.fpr();
2643     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2644     switch (elementSize(type)) {
2645     case 4:
2646         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2647         m_jit.convertFloatToDouble(resultReg, resultReg);
2648         break;
2649     case 8: {
2650         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2651         break;
2652     }
2653     default:
2654         RELEASE_ASSERT_NOT_REACHED();
2655     }
2656     
2657     doubleResult(resultReg, node);
2658 }
2659
2660 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2661 {
2662     ASSERT(isFloat(type));
2663     
2664     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2665     GPRReg storageReg = storage.gpr();
2666     
2667     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2668     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2669
2670     SpeculateDoubleOperand valueOp(this, valueUse);
2671     FPRTemporary scratch(this);
2672     FPRReg valueFPR = valueOp.fpr();
2673     FPRReg scratchFPR = scratch.fpr();
2674
2675     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2676     
2677     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2678     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2679         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2680         outOfBounds = MacroAssembler::Jump();
2681     }
2682     
2683     switch (elementSize(type)) {
2684     case 4: {
2685         m_jit.moveDouble(valueFPR, scratchFPR);
2686         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2687         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2688         break;
2689     }
2690     case 8:
2691         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2692         break;
2693     default:
2694         RELEASE_ASSERT_NOT_REACHED();
2695     }
2696     if (outOfBounds.isSet())
2697         outOfBounds.link(&m_jit);
2698     noResult(node);
2699 }
2700
2701 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2702 {
2703     // Check that prototype is an object.
2704     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
2705     
2706     // Initialize scratchReg with the value being checked.
2707     m_jit.move(valueReg, scratchReg);
2708     
2709     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2710     MacroAssembler::Label loop(&m_jit);
2711     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2712     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2713     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2714 #if USE(JSVALUE64)
2715     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2716 #else
2717     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2718 #endif
2719     
2720     // No match - result is false.
2721 #if USE(JSVALUE64)
2722     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2723 #else
2724     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2725 #endif
2726     MacroAssembler::Jump putResult = m_jit.jump();
2727     
2728     isInstance.link(&m_jit);
2729 #if USE(JSVALUE64)
2730     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2731 #else
2732     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2733 #endif
2734     
2735     putResult.link(&m_jit);
2736 }
2737
2738 void SpeculativeJIT::compileInstanceOf(Node* node)
2739 {
2740     if (node->child1().useKind() == UntypedUse) {
2741         // It might not be a cell. Speculate less aggressively.
2742         // Or: it might only be used once (i.e. by us), so we get zero benefit
2743         // from speculating any more aggressively than we absolutely need to.
2744         
2745         JSValueOperand value(this, node->child1());
2746         SpeculateCellOperand prototype(this, node->child2());
2747         GPRTemporary scratch(this);
2748         GPRTemporary scratch2(this);
2749         
2750         GPRReg prototypeReg = prototype.gpr();
2751         GPRReg scratchReg = scratch.gpr();
2752         GPRReg scratch2Reg = scratch2.gpr();
2753         
2754         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
2755         GPRReg valueReg = value.jsValueRegs().payloadGPR();
2756         moveFalseTo(scratchReg);
2757
2758         MacroAssembler::Jump done = m_jit.jump();
2759         
2760         isCell.link(&m_jit);
2761         
2762         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2763         
2764         done.link(&m_jit);
2765
2766         blessedBooleanResult(scratchReg, node);
2767         return;
2768     }
2769     
2770     SpeculateCellOperand value(this, node->child1());
2771     SpeculateCellOperand prototype(this, node->child2());
2772     
2773     GPRTemporary scratch(this);
2774     GPRTemporary scratch2(this);
2775     
2776     GPRReg valueReg = value.gpr();
2777     GPRReg prototypeReg = prototype.gpr();
2778     GPRReg scratchReg = scratch.gpr();
2779     GPRReg scratch2Reg = scratch2.gpr();
2780     
2781     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2782
2783     blessedBooleanResult(scratchReg, node);
2784 }
2785
2786 void SpeculativeJIT::compileValueAdd(Node* node)
2787 {
2788     if (isKnownNotNumber(node->child1().node()) || isKnownNotNumber(node->child2().node())) {
2789         JSValueOperand left(this, node->child1());
2790         JSValueOperand right(this, node->child2());
2791         JSValueRegs leftRegs = left.jsValueRegs();
2792         JSValueRegs rightRegs = right.jsValueRegs();
2793 #if USE(JSVALUE64)
2794         GPRTemporary result(this);
2795         JSValueRegs resultRegs = JSValueRegs(result.gpr());
2796 #else
2797         GPRTemporary resultTag(this);
2798         GPRTemporary resultPayload(this);
2799         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2800 #endif
2801         flushRegisters();
2802         callOperation(operationValueAddNotNumber, resultRegs, leftRegs, rightRegs);
2803         m_jit.exceptionCheck();
2804     
2805         jsValueResult(resultRegs, node);
2806         return;
2807     }
2808
2809     bool leftIsConstInt32 = node->child1()->isInt32Constant();
2810     bool rightIsConstInt32 = node->child2()->isInt32Constant();
2811
2812     // The DFG does not always fold the sum of 2 constant int operands together.
2813     if (leftIsConstInt32 && rightIsConstInt32) {
2814 #if USE(JSVALUE64)
2815         GPRTemporary result(this);
2816         JSValueRegs resultRegs = JSValueRegs(result.gpr());
2817 #else
2818         GPRTemporary resultTag(this);
2819         GPRTemporary resultPayload(this);
2820         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2821 #endif
2822         int64_t leftConst = node->child1()->asInt32();
2823         int64_t rightConst = node->child2()->asInt32();
2824         int64_t resultConst = leftConst + rightConst;
2825         m_jit.moveValue(JSValue(resultConst), resultRegs);
2826         jsValueResult(resultRegs, node);
2827         return;
2828     }
2829
2830     Optional<JSValueOperand> left;
2831     Optional<JSValueOperand> right;
2832
2833     JSValueRegs leftRegs;
2834     JSValueRegs rightRegs;
2835
2836     FPRTemporary leftNumber(this);
2837     FPRTemporary rightNumber(this);
2838     FPRReg leftFPR = leftNumber.fpr();
2839     FPRReg rightFPR = rightNumber.fpr();
2840
2841 #if USE(JSVALUE64)
2842     GPRTemporary result(this);
2843     JSValueRegs resultRegs = JSValueRegs(result.gpr());
2844     GPRTemporary scratch(this);
2845     GPRReg scratchGPR = scratch.gpr();
2846     FPRReg scratchFPR = InvalidFPRReg;
2847 #else
2848     GPRTemporary resultTag(this);
2849     GPRTemporary resultPayload(this);
2850     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2851     GPRReg scratchGPR = resultTag.gpr();
2852     FPRTemporary fprScratch(this);
2853     FPRReg scratchFPR = fprScratch.fpr();
2854 #endif
2855
2856     SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
2857     SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
2858
2859     if (leftIsConstInt32)
2860         leftOperand.setConstInt32(node->child1()->asInt32());
2861     if (rightIsConstInt32)
2862         rightOperand.setConstInt32(node->child2()->asInt32());
2863
2864     ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
2865
2866     if (!leftOperand.isConst()) {
2867         left = JSValueOperand(this, node->child1());
2868         leftRegs = left->jsValueRegs();
2869     }
2870     if (!rightOperand.isConst()) {
2871         right = JSValueOperand(this, node->child2());
2872         rightRegs = right->jsValueRegs();
2873     }
2874
2875     JITAddGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
2876         leftFPR, rightFPR, scratchGPR, scratchFPR);
2877     gen.generateFastPath(m_jit);
2878
2879     ASSERT(gen.didEmitFastPath());
2880     gen.endJumpList().append(m_jit.jump());
2881
2882     gen.slowPathJumpList().link(&m_jit);
2883
2884     silentSpillAllRegisters(resultRegs);
2885
2886     if (leftIsConstInt32) {
2887         leftRegs = resultRegs;
2888         int64_t leftConst = node->child1()->asInt32();
2889         m_jit.moveValue(JSValue(leftConst), leftRegs);
2890     } else if (rightIsConstInt32) {
2891         rightRegs = resultRegs;
2892         int64_t rightConst = node->child2()->asInt32();
2893         m_jit.moveValue(JSValue(rightConst), rightRegs);
2894     }
2895
2896     callOperation(operationValueAdd, resultRegs, leftRegs, rightRegs);
2897
2898     silentFillAllRegisters(resultRegs);
2899     m_jit.exceptionCheck();
2900
2901     gen.endJumpList().link(&m_jit);
2902     jsValueResult(resultRegs, node);
2903     return;
2904 }
2905
2906 void SpeculativeJIT::compileArithAdd(Node* node)
2907 {
2908     switch (node->binaryUseKind()) {
2909     case Int32Use: {
2910         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2911         
2912         if (node->child1()->isInt32Constant()) {
2913             int32_t imm1 = node->child1()->asInt32();
2914             SpeculateInt32Operand op2(this, node->child2());
2915             GPRTemporary result(this);
2916
2917             if (!shouldCheckOverflow(node->arithMode())) {
2918                 m_jit.move(op2.gpr(), result.gpr());
2919                 m_jit.add32(Imm32(imm1), result.gpr());
2920             } else
2921                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
2922
2923             int32Result(result.gpr(), node);
2924             return;
2925         }
2926         
2927         if (node->child2()->isInt32Constant()) {
2928             SpeculateInt32Operand op1(this, node->child1());
2929             int32_t imm2 = node->child2()->asInt32();
2930             GPRTemporary result(this);
2931                 
2932             if (!shouldCheckOverflow(node->arithMode())) {
2933                 m_jit.move(op1.gpr(), result.gpr());
2934                 m_jit.add32(Imm32(imm2), result.gpr());
2935             } else
2936                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
2937
2938             int32Result(result.gpr(), node);
2939             return;
2940         }
2941                 
2942         SpeculateInt32Operand op1(this, node->child1());
2943         SpeculateInt32Operand op2(this, node->child2());
2944         GPRTemporary result(this, Reuse, op1, op2);
2945
2946         GPRReg gpr1 = op1.gpr();
2947         GPRReg gpr2 = op2.gpr();
2948         GPRReg gprResult = result.gpr();
2949
2950         if (!shouldCheckOverflow(node->arithMode())) {
2951             if (gpr1 == gprResult)
2952                 m_jit.add32(gpr2, gprResult);
2953             else {
2954                 m_jit.move(gpr2, gprResult);
2955                 m_jit.add32(gpr1, gprResult);
2956             }
2957         } else {
2958             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
2959                 
2960             if (gpr1 == gprResult)
2961                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
2962             else if (gpr2 == gprResult)
2963                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
2964             else
2965                 speculationCheck(Overflow, JSValueRegs(), 0, check);
2966         }
2967
2968         int32Result(gprResult, node);
2969         return;
2970     }
2971         
2972 #if USE(JSVALUE64)
2973     case Int52RepUse: {
2974         ASSERT(shouldCheckOverflow(node->arithMode()));
2975         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2976
2977         // Will we need an overflow check? If we can prove that neither input can be
2978         // Int52 then the overflow check will not be necessary.
2979         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2980             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2981             SpeculateWhicheverInt52Operand op1(this, node->child1());
2982             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2983             GPRTemporary result(this, Reuse, op1);
2984             m_jit.move(op1.gpr(), result.gpr());
2985             m_jit.add64(op2.gpr(), result.gpr());
2986             int52Result(result.gpr(), node, op1.format());
2987             return;
2988         }
2989         
2990         SpeculateInt52Operand op1(this, node->child1());
2991         SpeculateInt52Operand op2(this, node->child2());
2992         GPRTemporary result(this);
2993         m_jit.move(op1.gpr(), result.gpr());
2994         speculationCheck(
2995             Int52Overflow, JSValueRegs(), 0,
2996             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2997         int52Result(result.gpr(), node);
2998         return;
2999     }
3000 #endif // USE(JSVALUE64)
3001     
3002     case DoubleRepUse: {
3003         SpeculateDoubleOperand op1(this, node->child1());
3004         SpeculateDoubleOperand op2(this, node->child2());
3005         FPRTemporary result(this, op1, op2);
3006
3007         FPRReg reg1 = op1.fpr();
3008         FPRReg reg2 = op2.fpr();
3009         m_jit.addDouble(reg1, reg2, result.fpr());
3010
3011         doubleResult(result.fpr(), node);
3012         return;
3013     }
3014         
3015     default:
3016         RELEASE_ASSERT_NOT_REACHED();
3017         break;
3018     }
3019 }
3020
3021 void SpeculativeJIT::compileMakeRope(Node* node)
3022 {
3023     ASSERT(node->child1().useKind() == KnownStringUse);
3024     ASSERT(node->child2().useKind() == KnownStringUse);
3025     ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
3026     
3027     SpeculateCellOperand op1(this, node->child1());
3028     SpeculateCellOperand op2(this, node->child2());
3029     SpeculateCellOperand op3(this, node->child3());
3030     GPRTemporary result(this);
3031     GPRTemporary allocator(this);
3032     GPRTemporary scratch(this);
3033     
3034     GPRReg opGPRs[3];
3035     unsigned numOpGPRs;
3036     opGPRs[0] = op1.gpr();
3037     opGPRs[1] = op2.gpr();
3038     if (node->child3()) {
3039         opGPRs[2] = op3.gpr();
3040         numOpGPRs = 3;
3041     } else {
3042         opGPRs[2] = InvalidGPRReg;
3043         numOpGPRs = 2;
3044     }
3045     GPRReg resultGPR = result.gpr();
3046     GPRReg allocatorGPR = allocator.gpr();
3047     GPRReg scratchGPR = scratch.gpr();
3048     
3049     JITCompiler::JumpList slowPath;
3050     MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithDestructor(sizeof(JSRopeString));
3051     m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
3052     emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
3053         
3054     m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
3055     for (unsigned i = 0; i < numOpGPRs; ++i)
3056         m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3057     for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
3058         m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3059     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
3060     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
3061     if (!ASSERT_DISABLED) {
3062         JITCompiler::Jump ok = m_jit.branch32(
3063             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
3064         m_jit.abortWithReason(DFGNegativeStringLength);
3065         ok.link(&m_jit);
3066     }
3067     for (unsigned i = 1; i < numOpGPRs; ++i) {
3068         m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
3069         speculationCheck(
3070             Uncountable, JSValueSource(), nullptr,
3071             m_jit.branchAdd32(
3072                 JITCompiler::Overflow,
3073                 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
3074     }
3075     m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
3076     m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
3077     if (!ASSERT_DISABLED) {
3078         JITCompiler::Jump ok = m_jit.branch32(
3079             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
3080         m_jit.abortWithReason(DFGNegativeStringLength);
3081         ok.link(&m_jit);
3082     }
3083     m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
3084     
3085     switch (numOpGPRs) {
3086     case 2:
3087         addSlowPathGenerator(slowPathCall(
3088             slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
3089         break;
3090     case 3:
3091         addSlowPathGenerator(slowPathCall(
3092             slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
3093         break;
3094     default:
3095         RELEASE_ASSERT_NOT_REACHED();
3096         break;
3097     }
3098         
3099     cellResult(resultGPR, node);
3100 }
3101
3102 void SpeculativeJIT::compileArithClz32(Node* node)
3103 {
3104     ASSERT_WITH_MESSAGE(node->child1().useKind() == Int32Use || node->child1().useKind() == KnownInt32Use, "The Fixup phase should have enforced a Int32 operand.");
3105     SpeculateInt32Operand value(this, node->child1());
3106     GPRTemporary result(this, Reuse, value);
3107     GPRReg valueReg = value.gpr();
3108     GPRReg resultReg = result.gpr();
3109     m_jit.countLeadingZeros32(valueReg, resultReg);
3110     int32Result(resultReg, node);
3111 }
3112
3113 void SpeculativeJIT::compileArithSub(Node* node)
3114 {
3115     switch (node->binaryUseKind()) {
3116     case Int32Use: {
3117         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3118         
3119         if (node->child2()->isInt32Constant()) {
3120             SpeculateInt32Operand op1(this, node->child1());
3121             int32_t imm2 = node->child2()->asInt32();
3122             GPRTemporary result(this);
3123
3124             if (!shouldCheckOverflow(node->arithMode())) {
3125                 m_jit.move(op1.gpr(), result.gpr());
3126                 m_jit.sub32(Imm32(imm2), result.gpr());
3127             } else {
3128                 GPRTemporary scratch(this);
3129                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
3130             }
3131
3132             int32Result(result.gpr(), node);
3133             return;
3134         }
3135             
3136         if (node->child1()->isInt32Constant()) {
3137             int32_t imm1 = node->child1()->asInt32();
3138             SpeculateInt32Operand op2(this, node->child2());
3139             GPRTemporary result(this);
3140                 
3141             m_jit.move(Imm32(imm1), result.gpr());
3142             if (!shouldCheckOverflow(node->arithMode()))
3143                 m_jit.sub32(op2.gpr(), result.gpr());
3144             else
3145                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3146                 
3147             int32Result(result.gpr(), node);
3148             return;
3149         }
3150             
3151         SpeculateInt32Operand op1(this, node->child1());
3152         SpeculateInt32Operand op2(this, node->child2());
3153         GPRTemporary result(this);
3154
3155         if (!shouldCheckOverflow(node->arithMode())) {
3156             m_jit.move(op1.gpr(), result.gpr());
3157             m_jit.sub32(op2.gpr(), result.gpr());
3158         } else
3159             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
3160
3161         int32Result(result.gpr(), node);
3162         return;
3163     }
3164         
3165 #if USE(JSVALUE64)
3166     case Int52RepUse: {
3167         ASSERT(shouldCheckOverflow(node->arithMode()));
3168         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3169
3170         // Will we need an overflow check? If we can prove that neither input can be
3171         // Int52 then the overflow check will not be necessary.
3172         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
3173             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
3174             SpeculateWhicheverInt52Operand op1(this, node->child1());
3175             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3176             GPRTemporary result(this, Reuse, op1);
3177             m_jit.move(op1.gpr(), result.gpr());
3178             m_jit.sub64(op2.gpr(), result.gpr());
3179             int52Result(result.gpr(), node, op1.format());
3180             return;
3181         }
3182         
3183         SpeculateInt52Operand op1(this, node->child1());
3184         SpeculateInt52Operand op2(this, node->child2());
3185         GPRTemporary result(this);
3186         m_jit.move(op1.gpr(), result.gpr());
3187         speculationCheck(
3188             Int52Overflow, JSValueRegs(), 0,
3189             m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3190         int52Result(result.gpr(), node);
3191         return;
3192     }
3193 #endif // USE(JSVALUE64)
3194
3195     case DoubleRepUse: {
3196         SpeculateDoubleOperand op1(this, node->child1());
3197         SpeculateDoubleOperand op2(this, node->child2());
3198         FPRTemporary result(this, op1);
3199
3200         FPRReg reg1 = op1.fpr();
3201         FPRReg reg2 = op2.fpr();
3202         m_jit.subDouble(reg1, reg2, result.fpr());
3203
3204         doubleResult(result.fpr(), node);
3205         return;
3206     }
3207
3208     case UntypedUse: {
3209         JSValueOperand left(this, node->child1());
3210         JSValueOperand right(this, node->child2());
3211
3212         JSValueRegs leftRegs = left.jsValueRegs();
3213         JSValueRegs rightRegs = right.jsValueRegs();
3214
3215         FPRTemporary leftNumber(this);
3216         FPRTemporary rightNumber(this);
3217         FPRReg leftFPR = leftNumber.fpr();
3218         FPRReg rightFPR = rightNumber.fpr();
3219
3220 #if USE(JSVALUE64)
3221         GPRTemporary result(this);
3222         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3223         GPRTemporary scratch(this);
3224         GPRReg scratchGPR = scratch.gpr();
3225         FPRReg scratchFPR = InvalidFPRReg;
3226 #else
3227         GPRTemporary resultTag(this);
3228         GPRTemporary resultPayload(this);
3229         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3230         GPRReg scratchGPR = resultTag.gpr();
3231         FPRTemporary fprScratch(this);
3232         FPRReg scratchFPR = fprScratch.fpr();
3233 #endif
3234
3235         SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
3236         SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
3237
3238         JITSubGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3239             leftFPR, rightFPR, scratchGPR, scratchFPR);
3240         gen.generateFastPath(m_jit);
3241
3242         ASSERT(gen.didEmitFastPath());
3243         gen.endJumpList().append(m_jit.jump());
3244
3245         gen.slowPathJumpList().link(&m_jit);
3246         silentSpillAllRegisters(resultRegs);
3247         callOperation(operationValueSub, resultRegs, leftRegs, rightRegs);
3248         silentFillAllRegisters(resultRegs);
3249         m_jit.exceptionCheck();
3250
3251         gen.endJumpList().link(&m_jit);
3252         jsValueResult(resultRegs, node);
3253         return;
3254     }
3255
3256     default:
3257         RELEASE_ASSERT_NOT_REACHED();
3258         return;
3259     }
3260 }
3261
3262 void SpeculativeJIT::compileArithNegate(Node* node)
3263 {
3264     switch (node->child1().useKind()) {
3265     case Int32Use: {
3266         SpeculateInt32Operand op1(this, node->child1());
3267         GPRTemporary result(this);
3268
3269         m_jit.move(op1.gpr(), result.gpr());
3270
3271         // Note: there is no notion of being not used as a number, but someone
3272         // caring about negative zero.
3273         
3274         if (!shouldCheckOverflow(node->arithMode()))
3275             m_jit.neg32(result.gpr());
3276         else if (!shouldCheckNegativeZero(node->arithMode()))
3277             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
3278         else {
3279             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
3280             m_jit.neg32(result.gpr());
3281         }
3282
3283         int32Result(result.gpr(), node);
3284         return;
3285     }
3286
3287 #if USE(JSVALUE64)
3288     case Int52RepUse: {
3289         ASSERT(shouldCheckOverflow(node->arithMode()));
3290         
3291         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)) {
3292             SpeculateWhicheverInt52Operand op1(this, node->child1());
3293             GPRTemporary result(this);
3294             GPRReg op1GPR = op1.gpr();
3295             GPRReg resultGPR = result.gpr();
3296             m_jit.move(op1GPR, resultGPR);
3297             m_jit.neg64(resultGPR);
3298             if (shouldCheckNegativeZero(node->arithMode())) {
3299                 speculationCheck(
3300                     NegativeZero, JSValueRegs(), 0,
3301                     m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3302             }
3303             int52Result(resultGPR, node, op1.format());
3304             return;
3305         }
3306         
3307         SpeculateInt52Operand op1(this, node->child1());
3308         GPRTemporary result(this);
3309         GPRReg op1GPR = op1.gpr();
3310         GPRReg resultGPR = result.gpr();
3311         m_jit.move(op1GPR, resultGPR);
3312         speculationCheck(
3313             Int52Overflow, JSValueRegs(), 0,
3314             m_jit.branchNeg64(MacroAssembler::Overflow, resultGPR));
3315         if (shouldCheckNegativeZero(node->arithMode())) {
3316             speculationCheck(
3317                 NegativeZero, JSValueRegs(), 0,
3318                 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3319         }
3320         int52Result(resultGPR, node);
3321         return;
3322     }
3323 #endif // USE(JSVALUE64)
3324         
3325     case DoubleRepUse: {
3326         SpeculateDoubleOperand op1(this, node->child1());
3327         FPRTemporary result(this);
3328         
3329         m_jit.negateDouble(op1.fpr(), result.fpr());
3330         
3331         doubleResult(result.fpr(), node);
3332         return;
3333     }
3334         
3335     default:
3336         RELEASE_ASSERT_NOT_REACHED();
3337         return;
3338     }
3339 }
3340 void SpeculativeJIT::compileArithMul(Node* node)
3341 {
3342     switch (node->binaryUseKind()) {
3343     case Int32Use: {
3344         SpeculateInt32Operand op1(this, node->child1());
3345         SpeculateInt32Operand op2(this, node->child2());
3346         GPRTemporary result(this);
3347
3348         GPRReg reg1 = op1.gpr();
3349         GPRReg reg2 = op2.gpr();
3350
3351         // We can perform truncated multiplications if we get to this point, because if the
3352         // fixup phase could not prove that it would be safe, it would have turned us into
3353         // a double multiplication.
3354         if (!shouldCheckOverflow(node->arithMode())) {
3355             m_jit.move(reg1, result.gpr());
3356             m_jit.mul32(reg2, result.gpr());
3357         } else {
3358             speculationCheck(
3359                 Overflow, JSValueRegs(), 0,
3360                 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3361         }
3362             
3363         // Check for negative zero, if the users of this node care about such things.
3364         if (shouldCheckNegativeZero(node->arithMode())) {
3365             MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3366             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
3367             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
3368             resultNonZero.link(&m_jit);
3369         }
3370
3371         int32Result(result.gpr(), node);
3372         return;
3373     }
3374     
3375 #if USE(JSVALUE64)   
3376     case Int52RepUse: {
3377         ASSERT(shouldCheckOverflow(node->arithMode()));
3378         
3379         // This is super clever. We want to do an int52 multiplication and check the
3380         // int52 overflow bit. There is no direct hardware support for this, but we do
3381         // have the ability to do an int64 multiplication and check the int64 overflow
3382         // bit. We leverage that. Consider that a, b are int52 numbers inside int64
3383         // registers, with the high 12 bits being sign-extended. We can do:
3384         //
3385         //     (a * (b << 12))
3386         //
3387         // This will give us a left-shifted int52 (value is in high 52 bits, low 16
3388         // bits are zero) plus the int52 overflow bit. I.e. whether this 64-bit
3389         // multiplication overflows is identical to whether the 'a * b' 52-bit
3390         // multiplication overflows.
3391         //
3392         // In our nomenclature, this is:
3393         //
3394         //     strictInt52(a) * int52(b) => int52
3395         //
3396         // That is "strictInt52" means unshifted and "int52" means left-shifted by 16
3397         // bits.
3398         //
3399         // We don't care which of op1 or op2 serves as the left-shifted operand, so
3400         // we just do whatever is more convenient for op1 and have op2 do the
3401         // opposite. This ensures that we do at most one shift.
3402
3403         SpeculateWhicheverInt52Operand op1(this, node->child1());
3404         SpeculateWhicheverInt52Operand op2(this, node->child2(), OppositeShift, op1);
3405         GPRTemporary result(this);
3406         
3407         GPRReg op1GPR = op1.gpr();
3408         GPRReg op2GPR = op2.gpr();
3409         GPRReg resultGPR = result.gpr();
3410         
3411         m_jit.move(op1GPR, resultGPR);
3412         speculationCheck(
3413             Int52Overflow, JSValueRegs(), 0,
3414             m_jit.branchMul64(MacroAssembler::Overflow, op2GPR, resultGPR));
3415         
3416         if (shouldCheckNegativeZero(node->arithMode())) {
3417             MacroAssembler::Jump resultNonZero = m_jit.branchTest64(
3418                 MacroAssembler::NonZero, resultGPR);
3419             speculationCheck(
3420                 NegativeZero, JSValueRegs(), 0,
3421                 m_jit.branch64(MacroAssembler::LessThan, op1GPR, TrustedImm64(0)));
3422             speculationCheck(
3423                 NegativeZero, JSValueRegs(), 0,
3424                 m_jit.branch64(MacroAssembler::LessThan, op2GPR, TrustedImm64(0)));
3425             resultNonZero.link(&m_jit);
3426         }
3427         
3428         int52Result(resultGPR, node);
3429         return;
3430     }
3431 #endif // USE(JSVALUE64)
3432         
3433     case DoubleRepUse: {
3434         SpeculateDoubleOperand op1(this, node->child1());
3435         SpeculateDoubleOperand op2(this, node->child2());
3436         FPRTemporary result(this, op1, op2);
3437         
3438         FPRReg reg1 = op1.fpr();
3439         FPRReg reg2 = op2.fpr();
3440         
3441         m_jit.mulDouble(reg1, reg2, result.fpr());
3442         
3443         doubleResult(result.fpr(), node);
3444         return;
3445     }
3446         
3447     default:
3448         RELEASE_ASSERT_NOT_REACHED();
3449         return;
3450     }
3451 }
3452
3453 void SpeculativeJIT::compileArithDiv(Node* node)
3454 {
3455     switch (node->binaryUseKind()) {
3456     case Int32Use: {
3457 #if CPU(X86) || CPU(X86_64)
3458         SpeculateInt32Operand op1(this, node->child1());
3459         SpeculateInt32Operand op2(this, node->child2());
3460         GPRTemporary eax(this, X86Registers::eax);
3461         GPRTemporary edx(this, X86Registers::edx);
3462         GPRReg op1GPR = op1.gpr();
3463         GPRReg op2GPR = op2.gpr();
3464     
3465         GPRReg op2TempGPR;
3466         GPRReg temp;
3467         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3468             op2TempGPR = allocate();
3469             temp = op2TempGPR;
3470         } else {
3471             op2TempGPR = InvalidGPRReg;
3472             if (op1GPR == X86Registers::eax)
3473                 temp = X86Registers::edx;
3474             else
3475                 temp = X86Registers::eax;
3476         }
3477     
3478         ASSERT(temp != op1GPR);
3479         ASSERT(temp != op2GPR);
3480     
3481         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3482     
3483         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3484     
3485         JITCompiler::JumpList done;
3486         if (shouldCheckOverflow(node->arithMode())) {
3487             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3488             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3489         } else {
3490             // This is the case where we convert the result to an int after we're done, and we
3491             // already know that the denominator is either -1 or 0. So, if the denominator is
3492             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3493             // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
3494             // are happy to fall through to a normal division, since we're just dividing
3495             // something by negative 1.
3496         
3497             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3498             m_jit.move(TrustedImm32(0), eax.gpr());
3499             done.append(m_jit.jump());
3500         
3501             notZero.link(&m_jit);
3502             JITCompiler::Jump notNeg2ToThe31 =
3503                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3504             m_jit.zeroExtend32ToPtr(op1GPR, eax.gpr());
3505             done.append(m_jit.jump());
3506         
3507             notNeg2ToThe31.link(&m_jit);
3508         }
3509     
3510         safeDenominator.link(&m_jit);
3511     
3512         // If the user cares about negative zero, then speculate that we're not about
3513         // to produce negative zero.
3514         if (shouldCheckNegativeZero(node->arithMode())) {
3515             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3516             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3517             numeratorNonZero.link(&m_jit);
3518         }
3519     
3520         if (op2TempGPR != InvalidGPRReg) {
3521             m_jit.move(op2GPR, op2TempGPR);
3522             op2GPR = op2TempGPR;
3523         }
3524             
3525         m_jit.move(op1GPR, eax.gpr());
3526         m_jit.x86ConvertToDoubleWord32();
3527         m_jit.x86Div32(op2GPR);
3528             
3529         if (op2TempGPR != InvalidGPRReg)
3530             unlock(op2TempGPR);
3531
3532         // Check that there was no remainder. If there had been, then we'd be obligated to
3533         // produce a double result instead.
3534         if (shouldCheckOverflow(node->arithMode()))
3535             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
3536         
3537         done.link(&m_jit);
3538         int32Result(eax.gpr(), node);
3539 #elif HAVE(ARM_IDIV_INSTRUCTIONS) || CPU(ARM64)
3540         SpeculateInt32Operand op1(this, node->child1());
3541         SpeculateInt32Operand op2(this, node->child2());
3542         GPRReg op1GPR = op1.gpr();
3543         GPRReg op2GPR = op2.gpr();
3544         GPRTemporary quotient(this);
3545         GPRTemporary multiplyAnswer(this);
3546
3547         // If the user cares about negative zero, then speculate that we're not about
3548         // to produce negative zero.
3549         if (shouldCheckNegativeZero(node->arithMode())) {
3550             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3551             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3552             numeratorNonZero.link(&m_jit);
3553         }
3554
3555         if (shouldCheckOverflow(node->arithMode()))
3556             speculationCheck(Overflow, JSValueRegs(), nullptr, m_jit.branchTest32(MacroAssembler::Zero, op2GPR));
3557
3558         m_jit.assembler().sdiv<32>(quotient.gpr(), op1GPR, op2GPR);
3559
3560         // Check that there was no remainder. If there had been, then we'd be obligated to
3561         // produce a double result instead.
3562         if (shouldCheckOverflow(node->arithMode())) {
3563             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
3564             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
3565         }
3566
3567         int32Result(quotient.gpr(), node);
3568 #else
3569         RELEASE_ASSERT_NOT_REACHED();
3570 #endif
3571         break;
3572     }
3573         
3574     case DoubleRepUse: {
3575         SpeculateDoubleOperand op1(this, node->child1());
3576         SpeculateDoubleOperand op2(this, node->child2());
3577         FPRTemporary result(this, op1);
3578         
3579         FPRReg reg1 = op1.fpr();
3580         FPRReg reg2 = op2.fpr();
3581         m_jit.divDouble(reg1, reg2, result.fpr());
3582         
3583         doubleResult(result.fpr(), node);
3584         break;
3585     }
3586         
3587     default:
3588         RELEASE_ASSERT_NOT_REACHED();
3589         break;
3590     }
3591 }
3592
3593 void SpeculativeJIT::compileArithMod(Node* node)
3594 {
3595     switch (node->binaryUseKind()) {
3596     case Int32Use: {
3597         // In the fast path, the dividend value could be the final result
3598         // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
3599         SpeculateStrictInt32Operand op1(this, node->child1());
3600         
3601         if (node->child2()->isInt32Constant()) {
3602             int32_t divisor = node->child2()->asInt32();
3603             if (divisor > 1 && hasOneBitSet(divisor)) {
3604                 unsigned logarithm = WTF::fastLog2(static_cast<uint32_t>(divisor));
3605                 GPRReg dividendGPR = op1.gpr();
3606                 GPRTemporary result(this);
3607                 GPRReg resultGPR = result.gpr();
3608
3609                 // This is what LLVM generates. It's pretty crazy. Here's my
3610                 // attempt at understanding it.
3611                 
3612                 // First, compute either divisor - 1, or 0, depending on whether
3613                 // the dividend is negative:
3614                 //
3615                 // If dividend < 0:  resultGPR = divisor - 1
3616                 // If dividend >= 0: resultGPR = 0
3617                 m_jit.move(dividendGPR, resultGPR);
3618                 m_jit.rshift32(TrustedImm32(31), resultGPR);
3619                 m_jit.urshift32(TrustedImm32(32 - logarithm), resultGPR);
3620                 
3621                 // Add in the dividend, so that:
3622                 //
3623                 // If dividend < 0:  resultGPR = dividend + divisor - 1
3624                 // If dividend >= 0: resultGPR = dividend
3625                 m_jit.add32(dividendGPR, resultGPR);
3626                 
3627                 // Mask so as to only get the *high* bits. This rounds down
3628                 // (towards negative infinity) resultGPR to the nearest multiple
3629                 // of divisor, so that:
3630                 //
3631                 // If dividend < 0:  resultGPR = floor((dividend + divisor - 1) / divisor)
3632                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3633                 //
3634                 // Note that this can be simplified to:
3635                 //
3636                 // If dividend < 0:  resultGPR = ceil(dividend / divisor)
3637                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3638                 //
3639                 // Note that if the dividend is negative, resultGPR will also be negative.
3640                 // Regardless of the sign of dividend, resultGPR will be rounded towards
3641                 // zero, because of how things are conditionalized.
3642                 m_jit.and32(TrustedImm32(-divisor), resultGPR);
3643                 
3644                 // Subtract resultGPR from dividendGPR, which yields the remainder:
3645                 //
3646                 // resultGPR = dividendGPR - resultGPR
3647                 m_jit.neg32(resultGPR);
3648                 m_jit.add32(dividendGPR, resultGPR);
3649                 
3650                 if (shouldCheckNegativeZero(node->arithMode())) {
3651                     // Check that we're not about to create negative zero.
3652                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3653                     speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, resultGPR));
3654                     numeratorPositive.link(&m_jit);
3655                 }
3656
3657                 int32Result(resultGPR, node);
3658                 return;
3659             }
3660         }
3661         
3662 #if CPU(X86) || CPU(X86_64)
3663         if (node->child2()->isInt32Constant()) {
3664             int32_t divisor = node->child2()->asInt32();
3665             if (divisor && divisor != -1) {
3666                 GPRReg op1Gpr = op1.gpr();
3667
3668                 GPRTemporary eax(this, X86Registers::eax);
3669                 GPRTemporary edx(this, X86Registers::edx);
3670                 GPRTemporary scratch(this);
3671                 GPRReg scratchGPR = scratch.gpr();
3672
3673                 GPRReg op1SaveGPR;
3674                 if (op1Gpr == X86Registers::eax || op1Gpr == X86Registers::edx) {
3675                     op1SaveGPR = allocate();
3676                     ASSERT(op1Gpr != op1SaveGPR);
3677                     m_jit.move(op1Gpr, op1SaveGPR);
3678                 } else
3679                     op1SaveGPR = op1Gpr;
3680                 ASSERT(op1SaveGPR != X86Registers::eax);
3681                 ASSERT(op1SaveGPR != X86Registers::edx);
3682
3683                 m_jit.move(op1Gpr, eax.gpr());
3684                 m_jit.move(TrustedImm32(divisor), scratchGPR);
3685                 m_jit.x86ConvertToDoubleWord32();
3686                 m_jit.x86Div32(scratchGPR);
3687                 if (shouldCheckNegativeZero(node->arithMode())) {
3688                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3689                     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3690                     numeratorPositive.link(&m_jit);
3691                 }
3692             
3693                 if (op1SaveGPR != op1Gpr)
3694                     unlock(op1SaveGPR);
3695
3696                 int32Result(edx.gpr(), node);
3697                 return;
3698             }
3699         }
3700 #endif
3701
3702         SpeculateInt32Operand op2(this, node->child2());
3703 #if CPU(X86) || CPU(X86_64)
3704         GPRTemporary eax(this, X86Registers::eax);
3705         GPRTemporary edx(this, X86Registers::edx);
3706         GPRReg op1GPR = op1.gpr();
3707         GPRReg op2GPR = op2.gpr();
3708     
3709         GPRReg op2TempGPR;
3710         GPRReg temp;
3711         GPRReg op1SaveGPR;
3712     
3713         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3714             op2TempGPR = allocate();
3715             temp = op2TempGPR;
3716         } else {
3717             op2TempGPR = InvalidGPRReg;
3718             if (op1GPR == X86Registers::eax)
3719                 temp = X86Registers::edx;
3720             else
3721                 temp = X86Registers::eax;
3722         }
3723     
3724         if (op1GPR == X86Registers::eax || op1GPR == X86Registers::edx) {
3725             op1SaveGPR = allocate();
3726             ASSERT(op1GPR != op1SaveGPR);
3727             m_jit.move(op1GPR, op1SaveGPR);
3728         } else
3729             op1SaveGPR = op1GPR;
3730     
3731         ASSERT(temp != op1GPR);
3732         ASSERT(temp != op2GPR);
3733         ASSERT(op1SaveGPR != X86Registers::eax);
3734         ASSERT(op1SaveGPR != X86Registers::edx);
3735     
3736         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3737     
3738         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3739     
3740         JITCompiler::JumpList done;
3741         
3742         // FIXME: -2^31 / -1 will actually yield negative zero, so we could have a
3743         // separate case for that. But it probably doesn't matter so much.
3744         if (shouldCheckOverflow(node->arithMode())) {
3745             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3746             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3747         } else {
3748             // This is the case where we convert the result to an int after we're done, and we
3749             // already know that the denominator is either -1 or 0. So, if the denominator is
3750             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3751             // -1) and the numerator is -2^31 then the result should be 0. Otherwise we are
3752             // happy to fall through to a normal division, since we're just dividing something
3753             // by negative 1.
3754         
3755             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3756             m_jit.move(TrustedImm32(0), edx.gpr());
3757             done.append(m_jit.jump());
3758         
3759             notZero.link(&m_jit);
3760             JITCompiler::Jump notNeg2ToThe31 =
3761                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3762             m_jit.move(TrustedImm32(0), edx.gpr());
3763             done.append(m_jit.jump());
3764         
3765             notNeg2ToThe31.link(&m_jit);
3766         }
3767         
3768         safeDenominator.link(&m_jit);
3769             
3770         if (op2TempGPR != InvalidGPRReg) {
3771             m_jit.move(op2GPR, op2TempGPR);
3772             op2GPR = op2TempGPR;
3773         }
3774             
3775         m_jit.move(op1GPR, eax.gpr());
3776         m_jit.x86ConvertToDoubleWord32();
3777         m_jit.x86Div32(op2GPR);
3778             
3779         if (op2TempGPR != InvalidGPRReg)
3780             unlock(op2TempGPR);
3781
3782         // Check that we're not about to create negative zero.
3783         if (shouldCheckNegativeZero(node->arithMode())) {
3784             JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3785             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3786             numeratorPositive.link(&m_jit);
3787         }
3788     
3789         if (op1SaveGPR != op1GPR)
3790             unlock(op1SaveGPR);
3791             
3792         done.link(&m_jit);
3793         int32Result(edx.gpr(), node);
3794
3795 #elif HAVE(ARM_IDIV_INSTRUCTIONS) || CPU(ARM64)
3796         GPRTemporary temp(this);
3797         GPRTemporary quotientThenRemainder(this);
3798         GPRTemporary multiplyAnswer(this);
3799         GPRReg dividendGPR = op1.gpr();
3800         GPRReg divisorGPR = op2.gpr();