Polymorphic operand types for DFG and FTL mul.
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGOSRExitFuzz.h"
38 #include "DFGSaneStringGetByValSlowPathGenerator.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DirectArguments.h"
41 #include "JITAddGenerator.h"
42 #include "JITMulGenerator.h"
43 #include "JITSubGenerator.h"
44 #include "JSArrowFunction.h"
45 #include "JSCInlines.h"
46 #include "JSEnvironmentRecord.h"
47 #include "JSLexicalEnvironment.h"
48 #include "LinkBuffer.h"
49 #include "ScopedArguments.h"
50 #include "ScratchRegisterAllocator.h"
51 #include "WriteBarrierBuffer.h"
52 #include <wtf/MathExtras.h>
53
54 namespace JSC { namespace DFG {
55
56 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
57     : m_compileOkay(true)
58     , m_jit(jit)
59     , m_currentNode(0)
60     , m_lastGeneratedNode(LastNodeType)
61     , m_indexInBlock(0)
62     , m_generationInfo(m_jit.graph().frameRegisterCount())
63     , m_state(m_jit.graph())
64     , m_interpreter(m_jit.graph(), m_state)
65     , m_stream(&jit.jitCode()->variableEventStream)
66     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
67 {
68 }
69
70 SpeculativeJIT::~SpeculativeJIT()
71 {
72 }
73
74 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
75 {
76     ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
77     
78     GPRTemporary scratch(this);
79     GPRTemporary scratch2(this);
80     GPRReg scratchGPR = scratch.gpr();
81     GPRReg scratch2GPR = scratch2.gpr();
82     
83     unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
84     
85     JITCompiler::JumpList slowCases;
86     
87     slowCases.append(
88         emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
89     m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
90     emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
91     
92     m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
93     m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
94     
95     if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
96 #if USE(JSVALUE64)
97         m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
98         for (unsigned i = numElements; i < vectorLength; ++i)
99             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
100 #else
101         EncodedValueDescriptor value;
102         value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
103         for (unsigned i = numElements; i < vectorLength; ++i) {
104             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
105             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
106         }
107 #endif
108     }
109     
110     // I want a slow path that also loads out the storage pointer, and that's
111     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
112     // of work for a very small piece of functionality. :-/
113     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
114         slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
115         structure, numElements));
116 }
117
118 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
119 {
120     if (inlineCallFrame && !inlineCallFrame->isVarargs())
121         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
122     else {
123         VirtualRegister argumentCountRegister;
124         if (!inlineCallFrame)
125             argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
126         else
127             argumentCountRegister = inlineCallFrame->argumentCountRegister;
128         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
129         if (!includeThis)
130             m_jit.sub32(TrustedImm32(1), lengthGPR);
131     }
132 }
133
134 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
135 {
136     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
137 }
138
139 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
140 {
141     if (origin.inlineCallFrame) {
142         if (origin.inlineCallFrame->isClosureCall) {
143             m_jit.loadPtr(
144                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
145                 calleeGPR);
146         } else {
147             m_jit.move(
148                 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
149                 calleeGPR);
150         }
151     } else
152         m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), calleeGPR);
153 }
154
155 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
156 {
157     m_jit.addPtr(
158         TrustedImm32(
159             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
160         GPRInfo::callFrameRegister, startGPR);
161 }
162
163 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
164 {
165     if (!doOSRExitFuzzing())
166         return MacroAssembler::Jump();
167     
168     MacroAssembler::Jump result;
169     
170     m_jit.pushToSave(GPRInfo::regT0);
171     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
172     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
173     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
174     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
175     unsigned at = Options::fireOSRExitFuzzAt();
176     if (at || atOrAfter) {
177         unsigned threshold;
178         MacroAssembler::RelationalCondition condition;
179         if (atOrAfter) {
180             threshold = atOrAfter;
181             condition = MacroAssembler::Below;
182         } else {
183             threshold = at;
184             condition = MacroAssembler::NotEqual;
185         }
186         MacroAssembler::Jump ok = m_jit.branch32(
187             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
188         m_jit.popToRestore(GPRInfo::regT0);
189         result = m_jit.jump();
190         ok.link(&m_jit);
191     }
192     m_jit.popToRestore(GPRInfo::regT0);
193     
194     return result;
195 }
196
197 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
198 {
199     if (!m_compileOkay)
200         return;
201     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
202     if (fuzzJump.isSet()) {
203         JITCompiler::JumpList jumpsToFail;
204         jumpsToFail.append(fuzzJump);
205         jumpsToFail.append(jumpToFail);
206         m_jit.appendExitInfo(jumpsToFail);
207     } else
208         m_jit.appendExitInfo(jumpToFail);
209     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
210 }
211
212 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
213 {
214     if (!m_compileOkay)
215         return;
216     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
217     if (fuzzJump.isSet()) {
218         JITCompiler::JumpList myJumpsToFail;
219         myJumpsToFail.append(jumpsToFail);
220         myJumpsToFail.append(fuzzJump);
221         m_jit.appendExitInfo(myJumpsToFail);
222     } else
223         m_jit.appendExitInfo(jumpsToFail);
224     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
225 }
226
227 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
228 {
229     if (!m_compileOkay)
230         return OSRExitJumpPlaceholder();
231     unsigned index = m_jit.jitCode()->osrExit.size();
232     m_jit.appendExitInfo();
233     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
234     return OSRExitJumpPlaceholder(index);
235 }
236
237 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
238 {
239     return speculationCheck(kind, jsValueSource, nodeUse.node());
240 }
241
242 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
243 {
244     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
245 }
246
247 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
248 {
249     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
250 }
251
252 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
253 {
254     if (!m_compileOkay)
255         return;
256     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
257     m_jit.appendExitInfo(jumpToFail);
258     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
259 }
260
261 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
262 {
263     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
264 }
265
266 void SpeculativeJIT::emitInvalidationPoint(Node* node)
267 {
268     if (!m_compileOkay)
269         return;
270     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
271     m_jit.jitCode()->appendOSRExit(OSRExit(
272         UncountableInvalidation, JSValueSource(),
273         m_jit.graph().methodOfGettingAValueProfileFor(node),
274         this, m_stream->size()));
275     info.m_replacementSource = m_jit.watchpointLabel();
276     ASSERT(info.m_replacementSource.isSet());
277     noResult(node);
278 }
279
280 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
281 {
282     if (!m_compileOkay)
283         return;
284     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
285     m_compileOkay = false;
286     if (verboseCompilationEnabled())
287         dataLog("Bailing compilation.\n");
288 }
289
290 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
291 {
292     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
293 }
294
295 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
296 {
297     ASSERT(needsTypeCheck(edge, typesPassedThrough));
298     m_interpreter.filter(edge, typesPassedThrough);
299     speculationCheck(BadType, source, edge.node(), jumpToFail);
300 }
301
302 RegisterSet SpeculativeJIT::usedRegisters()
303 {
304     RegisterSet result;
305     
306     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
307         GPRReg gpr = GPRInfo::toRegister(i);
308         if (m_gprs.isInUse(gpr))
309             result.set(gpr);
310     }
311     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
312         FPRReg fpr = FPRInfo::toRegister(i);
313         if (m_fprs.isInUse(fpr))
314             result.set(fpr);
315     }
316     
317     result.merge(RegisterSet::stubUnavailableRegisters());
318     
319     return result;
320 }
321
322 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
323 {
324     m_slowPathGenerators.append(WTF::move(slowPathGenerator));
325 }
326
327 void SpeculativeJIT::runSlowPathGenerators()
328 {
329     for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
330         m_slowPathGenerators[i]->generate(this);
331 }
332
333 // On Windows we need to wrap fmod; on other platforms we can call it directly.
334 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
335 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
336 static double JIT_OPERATION fmodAsDFGOperation(double x, double y)
337 {
338     return fmod(x, y);
339 }
340 #else
341 #define fmodAsDFGOperation fmod
342 #endif
343
344 void SpeculativeJIT::clearGenerationInfo()
345 {
346     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
347         m_generationInfo[i] = GenerationInfo();
348     m_gprs = RegisterBank<GPRInfo>();
349     m_fprs = RegisterBank<FPRInfo>();
350 }
351
352 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
353 {
354     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
355     Node* node = info.node();
356     DataFormat registerFormat = info.registerFormat();
357     ASSERT(registerFormat != DataFormatNone);
358     ASSERT(registerFormat != DataFormatDouble);
359         
360     SilentSpillAction spillAction;
361     SilentFillAction fillAction;
362         
363     if (!info.needsSpill())
364         spillAction = DoNothingForSpill;
365     else {
366 #if USE(JSVALUE64)
367         ASSERT(info.gpr() == source);
368         if (registerFormat == DataFormatInt32)
369             spillAction = Store32Payload;
370         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
371             spillAction = StorePtr;
372         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
373             spillAction = Store64;
374         else {
375             ASSERT(registerFormat & DataFormatJS);
376             spillAction = Store64;
377         }
378 #elif USE(JSVALUE32_64)
379         if (registerFormat & DataFormatJS) {
380             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
381             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
382         } else {
383             ASSERT(info.gpr() == source);
384             spillAction = Store32Payload;
385         }
386 #endif
387     }
388         
389     if (registerFormat == DataFormatInt32) {
390         ASSERT(info.gpr() == source);
391         ASSERT(isJSInt32(info.registerFormat()));
392         if (node->hasConstant()) {
393             ASSERT(node->isInt32Constant());
394             fillAction = SetInt32Constant;
395         } else
396             fillAction = Load32Payload;
397     } else if (registerFormat == DataFormatBoolean) {
398 #if USE(JSVALUE64)
399         RELEASE_ASSERT_NOT_REACHED();
400 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
401         fillAction = DoNothingForFill;
402 #endif
403 #elif USE(JSVALUE32_64)
404         ASSERT(info.gpr() == source);
405         if (node->hasConstant()) {
406             ASSERT(node->isBooleanConstant());
407             fillAction = SetBooleanConstant;
408         } else
409             fillAction = Load32Payload;
410 #endif
411     } else if (registerFormat == DataFormatCell) {
412         ASSERT(info.gpr() == source);
413         if (node->hasConstant()) {
414             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
415             node->asCell(); // To get the assertion.
416             fillAction = SetCellConstant;
417         } else {
418 #if USE(JSVALUE64)
419             fillAction = LoadPtr;
420 #else
421             fillAction = Load32Payload;
422 #endif
423         }
424     } else if (registerFormat == DataFormatStorage) {
425         ASSERT(info.gpr() == source);
426         fillAction = LoadPtr;
427     } else if (registerFormat == DataFormatInt52) {
428         if (node->hasConstant())
429             fillAction = SetInt52Constant;
430         else if (info.spillFormat() == DataFormatInt52)
431             fillAction = Load64;
432         else if (info.spillFormat() == DataFormatStrictInt52)
433             fillAction = Load64ShiftInt52Left;
434         else if (info.spillFormat() == DataFormatNone)
435             fillAction = Load64;
436         else {
437             RELEASE_ASSERT_NOT_REACHED();
438 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
439             fillAction = Load64; // Make GCC happy.
440 #endif
441         }
442     } else if (registerFormat == DataFormatStrictInt52) {
443         if (node->hasConstant())
444             fillAction = SetStrictInt52Constant;
445         else if (info.spillFormat() == DataFormatInt52)
446             fillAction = Load64ShiftInt52Right;
447         else if (info.spillFormat() == DataFormatStrictInt52)
448             fillAction = Load64;
449         else if (info.spillFormat() == DataFormatNone)
450             fillAction = Load64;
451         else {
452             RELEASE_ASSERT_NOT_REACHED();
453 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
454             fillAction = Load64; // Make GCC happy.
455 #endif
456         }
457     } else {
458         ASSERT(registerFormat & DataFormatJS);
459 #if USE(JSVALUE64)
460         ASSERT(info.gpr() == source);
461         if (node->hasConstant()) {
462             if (node->isCellConstant())
463                 fillAction = SetTrustedJSConstant;
464             else
465                 fillAction = SetJSConstant;
466         } else if (info.spillFormat() == DataFormatInt32) {
467             ASSERT(registerFormat == DataFormatJSInt32);
468             fillAction = Load32PayloadBoxInt;
469         } else
470             fillAction = Load64;
471 #else
472         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
473         if (node->hasConstant())
474             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
475         else if (info.payloadGPR() == source)
476             fillAction = Load32Payload;
477         else { // Fill the Tag
478             switch (info.spillFormat()) {
479             case DataFormatInt32:
480                 ASSERT(registerFormat == DataFormatJSInt32);
481                 fillAction = SetInt32Tag;
482                 break;
483             case DataFormatCell:
484                 ASSERT(registerFormat == DataFormatJSCell);
485                 fillAction = SetCellTag;
486                 break;
487             case DataFormatBoolean:
488                 ASSERT(registerFormat == DataFormatJSBoolean);
489                 fillAction = SetBooleanTag;
490                 break;
491             default:
492                 fillAction = Load32Tag;
493                 break;
494             }
495         }
496 #endif
497     }
498         
499     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
500 }
501     
502 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
503 {
504     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
505     Node* node = info.node();
506     ASSERT(info.registerFormat() == DataFormatDouble);
507
508     SilentSpillAction spillAction;
509     SilentFillAction fillAction;
510         
511     if (!info.needsSpill())
512         spillAction = DoNothingForSpill;
513     else {
514         ASSERT(!node->hasConstant());
515         ASSERT(info.spillFormat() == DataFormatNone);
516         ASSERT(info.fpr() == source);
517         spillAction = StoreDouble;
518     }
519         
520 #if USE(JSVALUE64)
521     if (node->hasConstant()) {
522         node->asNumber(); // To get the assertion.
523         fillAction = SetDoubleConstant;
524     } else {
525         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
526         fillAction = LoadDouble;
527     }
528 #elif USE(JSVALUE32_64)
529     ASSERT(info.registerFormat() == DataFormatDouble);
530     if (node->hasConstant()) {
531         node->asNumber(); // To get the assertion.
532         fillAction = SetDoubleConstant;
533     } else
534         fillAction = LoadDouble;
535 #endif
536
537     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
538 }
539     
540 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
541 {
542     switch (plan.spillAction()) {
543     case DoNothingForSpill:
544         break;
545     case Store32Tag:
546         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
547         break;
548     case Store32Payload:
549         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
550         break;
551     case StorePtr:
552         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
553         break;
554 #if USE(JSVALUE64)
555     case Store64:
556         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
557         break;
558 #endif
559     case StoreDouble:
560         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
561         break;
562     default:
563         RELEASE_ASSERT_NOT_REACHED();
564     }
565 }
566     
567 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
568 {
569 #if USE(JSVALUE32_64)
570     UNUSED_PARAM(canTrample);
571 #endif
572     switch (plan.fillAction()) {
573     case DoNothingForFill:
574         break;
575     case SetInt32Constant:
576         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
577         break;
578 #if USE(JSVALUE64)
579     case SetInt52Constant:
580         m_jit.move(Imm64(plan.node()->asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
581         break;
582     case SetStrictInt52Constant:
583         m_jit.move(Imm64(plan.node()->asMachineInt()), plan.gpr());
584         break;
585 #endif // USE(JSVALUE64)
586     case SetBooleanConstant:
587         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
588         break;
589     case SetCellConstant:
590         m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
591         break;
592 #if USE(JSVALUE64)
593     case SetTrustedJSConstant:
594         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
595         break;
596     case SetJSConstant:
597         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
598         break;
599     case SetDoubleConstant:
600         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
601         m_jit.move64ToDouble(canTrample, plan.fpr());
602         break;
603     case Load32PayloadBoxInt:
604         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
605         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
606         break;
607     case Load32PayloadConvertToInt52:
608         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
609         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
610         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
611         break;
612     case Load32PayloadSignExtend:
613         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
614         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
615         break;
616 #else
617     case SetJSConstantTag:
618         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
619         break;
620     case SetJSConstantPayload:
621         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
622         break;
623     case SetInt32Tag:
624         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
625         break;
626     case SetCellTag:
627         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
628         break;
629     case SetBooleanTag:
630         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
631         break;
632     case SetDoubleConstant:
633         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
634         break;
635 #endif
636     case Load32Tag:
637         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
638         break;
639     case Load32Payload:
640         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
641         break;
642     case LoadPtr:
643         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
644         break;
645 #if USE(JSVALUE64)
646     case Load64:
647         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
648         break;
649     case Load64ShiftInt52Right:
650         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
651         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
652         break;
653     case Load64ShiftInt52Left:
654         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
655         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
656         break;
657 #endif
658     case LoadDouble:
659         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
660         break;
661     default:
662         RELEASE_ASSERT_NOT_REACHED();
663     }
664 }
665     
666 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
667 {
668     switch (arrayMode.arrayClass()) {
669     case Array::OriginalArray: {
670         CRASH();
671 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
672         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
673         return result;
674 #endif
675     }
676         
677     case Array::Array:
678         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
679         return m_jit.branch32(
680             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
681         
682     case Array::NonArray:
683     case Array::OriginalNonArray:
684         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
685         return m_jit.branch32(
686             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
687         
688     case Array::PossiblyArray:
689         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
690         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
691     }
692     
693     RELEASE_ASSERT_NOT_REACHED();
694     return JITCompiler::Jump();
695 }
696
697 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
698 {
699     JITCompiler::JumpList result;
700     
701     switch (arrayMode.type()) {
702     case Array::Int32:
703         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
704
705     case Array::Double:
706         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
707
708     case Array::Contiguous:
709         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
710
711     case Array::Undecided:
712         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
713
714     case Array::ArrayStorage:
715     case Array::SlowPutArrayStorage: {
716         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
717         
718         if (arrayMode.isJSArray()) {
719             if (arrayMode.isSlowPut()) {
720                 result.append(
721                     m_jit.branchTest32(
722                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
723                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
724                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
725                 result.append(
726                     m_jit.branch32(
727                         MacroAssembler::Above, tempGPR,
728                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
729                 break;
730             }
731             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
732             result.append(
733                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
734             break;
735         }
736         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
737         if (arrayMode.isSlowPut()) {
738             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
739             result.append(
740                 m_jit.branch32(
741                     MacroAssembler::Above, tempGPR,
742                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
743             break;
744         }
745         result.append(
746             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
747         break;
748     }
749     default:
750         CRASH();
751         break;
752     }
753     
754     return result;
755 }
756
757 void SpeculativeJIT::checkArray(Node* node)
758 {
759     ASSERT(node->arrayMode().isSpecific());
760     ASSERT(!node->arrayMode().doesConversion());
761     
762     SpeculateCellOperand base(this, node->child1());
763     GPRReg baseReg = base.gpr();
764     
765     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
766         noResult(m_currentNode);
767         return;
768     }
769     
770     const ClassInfo* expectedClassInfo = 0;
771     
772     switch (node->arrayMode().type()) {
773     case Array::AnyTypedArray:
774     case Array::String:
775         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
776         break;
777     case Array::Int32:
778     case Array::Double:
779     case Array::Contiguous:
780     case Array::Undecided:
781     case Array::ArrayStorage:
782     case Array::SlowPutArrayStorage: {
783         GPRTemporary temp(this);
784         GPRReg tempGPR = temp.gpr();
785         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
786         speculationCheck(
787             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
788             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
789         
790         noResult(m_currentNode);
791         return;
792     }
793     case Array::DirectArguments:
794         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
795         noResult(m_currentNode);
796         return;
797     case Array::ScopedArguments:
798         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
799         noResult(m_currentNode);
800         return;
801     default:
802         speculateCellTypeWithoutTypeFiltering(
803             node->child1(), baseReg,
804             typeForTypedArrayType(node->arrayMode().typedArrayType()));
805         noResult(m_currentNode);
806         return;
807     }
808     
809     RELEASE_ASSERT(expectedClassInfo);
810     
811     GPRTemporary temp(this);
812     GPRTemporary temp2(this);
813     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
814     speculationCheck(
815         BadType, JSValueSource::unboxedCell(baseReg), node,
816         m_jit.branchPtr(
817             MacroAssembler::NotEqual,
818             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
819             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
820     
821     noResult(m_currentNode);
822 }
823
824 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
825 {
826     ASSERT(node->arrayMode().doesConversion());
827     
828     GPRTemporary temp(this);
829     GPRTemporary structure;
830     GPRReg tempGPR = temp.gpr();
831     GPRReg structureGPR = InvalidGPRReg;
832     
833     if (node->op() != ArrayifyToStructure) {
834         GPRTemporary realStructure(this);
835         structure.adopt(realStructure);
836         structureGPR = structure.gpr();
837     }
838         
839     // We can skip all that comes next if we already have array storage.
840     MacroAssembler::JumpList slowPath;
841     
842     if (node->op() == ArrayifyToStructure) {
843         slowPath.append(m_jit.branchWeakStructure(
844             JITCompiler::NotEqual,
845             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
846             node->structure()));
847     } else {
848         m_jit.load8(
849             MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
850         
851         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
852     }
853     
854     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
855         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
856     
857     noResult(m_currentNode);
858 }
859
860 void SpeculativeJIT::arrayify(Node* node)
861 {
862     ASSERT(node->arrayMode().isSpecific());
863     
864     SpeculateCellOperand base(this, node->child1());
865     
866     if (!node->child2()) {
867         arrayify(node, base.gpr(), InvalidGPRReg);
868         return;
869     }
870     
871     SpeculateInt32Operand property(this, node->child2());
872     
873     arrayify(node, base.gpr(), property.gpr());
874 }
875
876 GPRReg SpeculativeJIT::fillStorage(Edge edge)
877 {
878     VirtualRegister virtualRegister = edge->virtualRegister();
879     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
880     
881     switch (info.registerFormat()) {
882     case DataFormatNone: {
883         if (info.spillFormat() == DataFormatStorage) {
884             GPRReg gpr = allocate();
885             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
886             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
887             info.fillStorage(*m_stream, gpr);
888             return gpr;
889         }
890         
891         // Must be a cell; fill it as a cell and then return the pointer.
892         return fillSpeculateCell(edge);
893     }
894         
895     case DataFormatStorage: {
896         GPRReg gpr = info.gpr();
897         m_gprs.lock(gpr);
898         return gpr;
899     }
900         
901     default:
902         return fillSpeculateCell(edge);
903     }
904 }
905
906 void SpeculativeJIT::useChildren(Node* node)
907 {
908     if (node->flags() & NodeHasVarArgs) {
909         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
910             if (!!m_jit.graph().m_varArgChildren[childIdx])
911                 use(m_jit.graph().m_varArgChildren[childIdx]);
912         }
913     } else {
914         Edge child1 = node->child1();
915         if (!child1) {
916             ASSERT(!node->child2() && !node->child3());
917             return;
918         }
919         use(child1);
920         
921         Edge child2 = node->child2();
922         if (!child2) {
923             ASSERT(!node->child3());
924             return;
925         }
926         use(child2);
927         
928         Edge child3 = node->child3();
929         if (!child3)
930             return;
931         use(child3);
932     }
933 }
934
935 void SpeculativeJIT::compileIn(Node* node)
936 {
937     SpeculateCellOperand base(this, node->child2());
938     GPRReg baseGPR = base.gpr();
939     
940     if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
941         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
942             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
943             
944             GPRTemporary result(this);
945             GPRReg resultGPR = result.gpr();
946
947             use(node->child1());
948             
949             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
950             MacroAssembler::Label done = m_jit.label();
951             
952             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
953             // we can cast it to const AtomicStringImpl* safely.
954             auto slowPath = slowPathCall(
955                 jump.m_jump, this, operationInOptimize,
956                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
957                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
958             
959             stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
960             stubInfo->codeOrigin = node->origin.semantic;
961             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
962             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
963 #if USE(JSVALUE32_64)
964             stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
965             stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
966 #endif
967             stubInfo->patch.usedRegisters = usedRegisters();
968
969             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
970             addSlowPathGenerator(WTF::move(slowPath));
971
972             base.use();
973
974             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
975             return;
976         }
977     }
978
979     JSValueOperand key(this, node->child1());
980     JSValueRegs regs = key.jsValueRegs();
981         
982     GPRFlushedCallResult result(this);
983     GPRReg resultGPR = result.gpr();
984         
985     base.use();
986     key.use();
987         
988     flushRegisters();
989     callOperation(
990         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
991         baseGPR, regs);
992     m_jit.exceptionCheck();
993     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
994 }
995
996 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
997 {
998     unsigned branchIndexInBlock = detectPeepHoleBranch();
999     if (branchIndexInBlock != UINT_MAX) {
1000         Node* branchNode = m_block->at(branchIndexInBlock);
1001
1002         ASSERT(node->adjustedRefCount() == 1);
1003         
1004         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1005     
1006         m_indexInBlock = branchIndexInBlock;
1007         m_currentNode = branchNode;
1008         
1009         return true;
1010     }
1011     
1012     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1013     
1014     return false;
1015 }
1016
1017 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1018 {
1019     unsigned branchIndexInBlock = detectPeepHoleBranch();
1020     if (branchIndexInBlock != UINT_MAX) {
1021         Node* branchNode = m_block->at(branchIndexInBlock);
1022
1023         ASSERT(node->adjustedRefCount() == 1);
1024         
1025         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1026     
1027         m_indexInBlock = branchIndexInBlock;
1028         m_currentNode = branchNode;
1029         
1030         return true;
1031     }
1032     
1033     nonSpeculativeNonPeepholeStrictEq(node, invert);
1034     
1035     return false;
1036 }
1037
1038 static const char* dataFormatString(DataFormat format)
1039 {
1040     // These values correspond to the DataFormat enum.
1041     const char* strings[] = {
1042         "[  ]",
1043         "[ i]",
1044         "[ d]",
1045         "[ c]",
1046         "Err!",
1047         "Err!",
1048         "Err!",
1049         "Err!",
1050         "[J ]",
1051         "[Ji]",
1052         "[Jd]",
1053         "[Jc]",
1054         "Err!",
1055         "Err!",
1056         "Err!",
1057         "Err!",
1058     };
1059     return strings[format];
1060 }
1061
1062 void SpeculativeJIT::dump(const char* label)
1063 {
1064     if (label)
1065         dataLogF("<%s>\n", label);
1066
1067     dataLogF("  gprs:\n");
1068     m_gprs.dump();
1069     dataLogF("  fprs:\n");
1070     m_fprs.dump();
1071     dataLogF("  VirtualRegisters:\n");
1072     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1073         GenerationInfo& info = m_generationInfo[i];
1074         if (info.alive())
1075             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1076         else
1077             dataLogF("    % 3d:[__][__]", i);
1078         if (info.registerFormat() == DataFormatDouble)
1079             dataLogF(":fpr%d\n", info.fpr());
1080         else if (info.registerFormat() != DataFormatNone
1081 #if USE(JSVALUE32_64)
1082             && !(info.registerFormat() & DataFormatJS)
1083 #endif
1084             ) {
1085             ASSERT(info.gpr() != InvalidGPRReg);
1086             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1087         } else
1088             dataLogF("\n");
1089     }
1090     if (label)
1091         dataLogF("</%s>\n", label);
1092 }
1093
1094 GPRTemporary::GPRTemporary()
1095     : m_jit(0)
1096     , m_gpr(InvalidGPRReg)
1097 {
1098 }
1099
1100 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1101     : m_jit(jit)
1102     , m_gpr(InvalidGPRReg)
1103 {
1104     m_gpr = m_jit->allocate();
1105 }
1106
1107 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1108     : m_jit(jit)
1109     , m_gpr(InvalidGPRReg)
1110 {
1111     m_gpr = m_jit->allocate(specific);
1112 }
1113
1114 #if USE(JSVALUE32_64)
1115 GPRTemporary::GPRTemporary(
1116     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1117     : m_jit(jit)
1118     , m_gpr(InvalidGPRReg)
1119 {
1120     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1121         m_gpr = m_jit->reuse(op1.gpr(which));
1122     else
1123         m_gpr = m_jit->allocate();
1124 }
1125 #endif // USE(JSVALUE32_64)
1126
1127 JSValueRegsTemporary::JSValueRegsTemporary() { }
1128
1129 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1130 #if USE(JSVALUE64)
1131     : m_gpr(jit)
1132 #else
1133     : m_payloadGPR(jit)
1134     , m_tagGPR(jit)
1135 #endif
1136 {
1137 }
1138
1139 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1140
1141 JSValueRegs JSValueRegsTemporary::regs()
1142 {
1143 #if USE(JSVALUE64)
1144     return JSValueRegs(m_gpr.gpr());
1145 #else
1146     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1147 #endif
1148 }
1149
1150 void GPRTemporary::adopt(GPRTemporary& other)
1151 {
1152     ASSERT(!m_jit);
1153     ASSERT(m_gpr == InvalidGPRReg);
1154     ASSERT(other.m_jit);
1155     ASSERT(other.m_gpr != InvalidGPRReg);
1156     m_jit = other.m_jit;
1157     m_gpr = other.m_gpr;
1158     other.m_jit = 0;
1159     other.m_gpr = InvalidGPRReg;
1160 }
1161
1162 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1163     : m_jit(jit)
1164     , m_fpr(InvalidFPRReg)
1165 {
1166     m_fpr = m_jit->fprAllocate();
1167 }
1168
1169 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1170     : m_jit(jit)
1171     , m_fpr(InvalidFPRReg)
1172 {
1173     if (m_jit->canReuse(op1.node()))
1174         m_fpr = m_jit->reuse(op1.fpr());
1175     else
1176         m_fpr = m_jit->fprAllocate();
1177 }
1178
1179 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1180     : m_jit(jit)
1181     , m_fpr(InvalidFPRReg)
1182 {
1183     if (m_jit->canReuse(op1.node()))
1184         m_fpr = m_jit->reuse(op1.fpr());
1185     else if (m_jit->canReuse(op2.node()))
1186         m_fpr = m_jit->reuse(op2.fpr());
1187     else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1188         m_fpr = m_jit->reuse(op1.fpr());
1189     else
1190         m_fpr = m_jit->fprAllocate();
1191 }
1192
1193 #if USE(JSVALUE32_64)
1194 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1195     : m_jit(jit)
1196     , m_fpr(InvalidFPRReg)
1197 {
1198     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1199         m_fpr = m_jit->reuse(op1.fpr());
1200     else
1201         m_fpr = m_jit->fprAllocate();
1202 }
1203 #endif
1204
1205 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1206 {
1207     BasicBlock* taken = branchNode->branchData()->taken.block;
1208     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1209     
1210     SpeculateDoubleOperand op1(this, node->child1());
1211     SpeculateDoubleOperand op2(this, node->child2());
1212     
1213     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1214     jump(notTaken);
1215 }
1216
1217 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1218 {
1219     BasicBlock* taken = branchNode->branchData()->taken.block;
1220     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1221
1222     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1223     
1224     if (taken == nextBlock()) {
1225         condition = MacroAssembler::NotEqual;
1226         BasicBlock* tmp = taken;
1227         taken = notTaken;
1228         notTaken = tmp;
1229     }
1230
1231     SpeculateCellOperand op1(this, node->child1());
1232     SpeculateCellOperand op2(this, node->child2());
1233     
1234     GPRReg op1GPR = op1.gpr();
1235     GPRReg op2GPR = op2.gpr();
1236     
1237     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1238         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1239             speculationCheck(
1240                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1241         }
1242         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1243             speculationCheck(
1244                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1245         }
1246     } else {
1247         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1248             speculationCheck(
1249                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1250                 m_jit.branchIfNotObject(op1GPR));
1251         }
1252         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1253             m_jit.branchTest8(
1254                 MacroAssembler::NonZero, 
1255                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1256                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1257
1258         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1259             speculationCheck(
1260                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1261                 m_jit.branchIfNotObject(op2GPR));
1262         }
1263         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1264             m_jit.branchTest8(
1265                 MacroAssembler::NonZero, 
1266                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1267                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1268     }
1269
1270     branchPtr(condition, op1GPR, op2GPR, taken);
1271     jump(notTaken);
1272 }
1273
1274 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1275 {
1276     BasicBlock* taken = branchNode->branchData()->taken.block;
1277     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1278
1279     // The branch instruction will branch to the taken block.
1280     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1281     if (taken == nextBlock()) {
1282         condition = JITCompiler::invert(condition);
1283         BasicBlock* tmp = taken;
1284         taken = notTaken;
1285         notTaken = tmp;
1286     }
1287
1288     if (node->child1()->isBooleanConstant()) {
1289         bool imm = node->child1()->asBoolean();
1290         SpeculateBooleanOperand op2(this, node->child2());
1291         branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1292     } else if (node->child2()->isBooleanConstant()) {
1293         SpeculateBooleanOperand op1(this, node->child1());
1294         bool imm = node->child2()->asBoolean();
1295         branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1296     } else {
1297         SpeculateBooleanOperand op1(this, node->child1());
1298         SpeculateBooleanOperand op2(this, node->child2());
1299         branch32(condition, op1.gpr(), op2.gpr(), taken);
1300     }
1301
1302     jump(notTaken);
1303 }
1304
1305 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1306 {
1307     BasicBlock* taken = branchNode->branchData()->taken.block;
1308     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1309
1310     // The branch instruction will branch to the taken block.
1311     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1312     if (taken == nextBlock()) {
1313         condition = JITCompiler::invert(condition);
1314         BasicBlock* tmp = taken;
1315         taken = notTaken;
1316         notTaken = tmp;
1317     }
1318
1319     if (node->child1()->isInt32Constant()) {
1320         int32_t imm = node->child1()->asInt32();
1321         SpeculateInt32Operand op2(this, node->child2());
1322         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1323     } else if (node->child2()->isInt32Constant()) {
1324         SpeculateInt32Operand op1(this, node->child1());
1325         int32_t imm = node->child2()->asInt32();
1326         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1327     } else {
1328         SpeculateInt32Operand op1(this, node->child1());
1329         SpeculateInt32Operand op2(this, node->child2());
1330         branch32(condition, op1.gpr(), op2.gpr(), taken);
1331     }
1332
1333     jump(notTaken);
1334 }
1335
1336 // Returns true if the compare is fused with a subsequent branch.
1337 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1338 {
1339     // Fused compare & branch.
1340     unsigned branchIndexInBlock = detectPeepHoleBranch();
1341     if (branchIndexInBlock != UINT_MAX) {
1342         Node* branchNode = m_block->at(branchIndexInBlock);
1343
1344         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1345         // so can be no intervening nodes to also reference the compare. 
1346         ASSERT(node->adjustedRefCount() == 1);
1347
1348         if (node->isBinaryUseKind(Int32Use))
1349             compilePeepHoleInt32Branch(node, branchNode, condition);
1350 #if USE(JSVALUE64)
1351         else if (node->isBinaryUseKind(Int52RepUse))
1352             compilePeepHoleInt52Branch(node, branchNode, condition);
1353 #endif // USE(JSVALUE64)
1354         else if (node->isBinaryUseKind(DoubleRepUse))
1355             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1356         else if (node->op() == CompareEq) {
1357             if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1358                 // Use non-peephole comparison, for now.
1359                 return false;
1360             }
1361             if (node->isBinaryUseKind(BooleanUse))
1362                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1363             else if (node->isBinaryUseKind(SymbolUse))
1364                 compilePeepHoleSymbolEquality(node, branchNode);
1365             else if (node->isBinaryUseKind(ObjectUse))
1366                 compilePeepHoleObjectEquality(node, branchNode);
1367             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1368                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1369             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1370                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1371             else if (!needsTypeCheck(node->child1(), SpecOther))
1372                 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1373             else if (!needsTypeCheck(node->child2(), SpecOther))
1374                 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1375             else {
1376                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1377                 return true;
1378             }
1379         } else {
1380             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1381             return true;
1382         }
1383
1384         use(node->child1());
1385         use(node->child2());
1386         m_indexInBlock = branchIndexInBlock;
1387         m_currentNode = branchNode;
1388         return true;
1389     }
1390     return false;
1391 }
1392
1393 void SpeculativeJIT::noticeOSRBirth(Node* node)
1394 {
1395     if (!node->hasVirtualRegister())
1396         return;
1397     
1398     VirtualRegister virtualRegister = node->virtualRegister();
1399     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1400     
1401     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1402 }
1403
1404 void SpeculativeJIT::compileMovHint(Node* node)
1405 {
1406     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1407     
1408     Node* child = node->child1().node();
1409     noticeOSRBirth(child);
1410     
1411     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1412 }
1413
1414 void SpeculativeJIT::bail(AbortReason reason)
1415 {
1416     if (verboseCompilationEnabled())
1417         dataLog("Bailing compilation.\n");
1418     m_compileOkay = true;
1419     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1420     clearGenerationInfo();
1421 }
1422
1423 void SpeculativeJIT::compileCurrentBlock()
1424 {
1425     ASSERT(m_compileOkay);
1426     
1427     if (!m_block)
1428         return;
1429     
1430     ASSERT(m_block->isReachable);
1431     
1432     m_jit.blockHeads()[m_block->index] = m_jit.label();
1433
1434     if (!m_block->intersectionOfCFAHasVisited) {
1435         // Don't generate code for basic blocks that are unreachable according to CFA.
1436         // But to be sure that nobody has generated a jump to this block, drop in a
1437         // breakpoint here.
1438         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1439         return;
1440     }
1441
1442     m_stream->appendAndLog(VariableEvent::reset());
1443     
1444     m_jit.jitAssertHasValidCallFrame();
1445     m_jit.jitAssertTagsInPlace();
1446     m_jit.jitAssertArgumentCountSane();
1447
1448     m_state.reset();
1449     m_state.beginBasicBlock(m_block);
1450     
1451     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1452         int operand = m_block->variablesAtHead.operandForIndex(i);
1453         Node* node = m_block->variablesAtHead[i];
1454         if (!node)
1455             continue; // No need to record dead SetLocal's.
1456         
1457         VariableAccessData* variable = node->variableAccessData();
1458         DataFormat format;
1459         if (!node->refCount())
1460             continue; // No need to record dead SetLocal's.
1461         format = dataFormatFor(variable->flushFormat());
1462         m_stream->appendAndLog(
1463             VariableEvent::setLocal(
1464                 VirtualRegister(operand),
1465                 variable->machineLocal(),
1466                 format));
1467     }
1468
1469     m_origin = NodeOrigin();
1470     
1471     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1472         m_currentNode = m_block->at(m_indexInBlock);
1473         
1474         // We may have hit a contradiction that the CFA was aware of but that the JIT
1475         // didn't cause directly.
1476         if (!m_state.isValid()) {
1477             bail(DFGBailedAtTopOfBlock);
1478             return;
1479         }
1480
1481         m_interpreter.startExecuting();
1482         m_jit.setForNode(m_currentNode);
1483         m_origin = m_currentNode->origin;
1484         if (validationEnabled())
1485             m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1486         m_lastGeneratedNode = m_currentNode->op();
1487         
1488         ASSERT(m_currentNode->shouldGenerate());
1489         
1490         if (verboseCompilationEnabled()) {
1491             dataLogF(
1492                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1493                 (int)m_currentNode->index(),
1494                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1495             dataLog("\n");
1496         }
1497         
1498         m_jit.jitAssertNoException();
1499
1500         compile(m_currentNode);
1501         
1502         if (belongsInMinifiedGraph(m_currentNode->op()))
1503             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1504         
1505 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1506         m_jit.clearRegisterAllocationOffsets();
1507 #endif
1508         
1509         if (!m_compileOkay) {
1510             bail(DFGBailedAtEndOfNode);
1511             return;
1512         }
1513         
1514         // Make sure that the abstract state is rematerialized for the next node.
1515         m_interpreter.executeEffects(m_indexInBlock);
1516     }
1517     
1518     // Perform the most basic verification that children have been used correctly.
1519     if (!ASSERT_DISABLED) {
1520         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1521             GenerationInfo& info = m_generationInfo[index];
1522             RELEASE_ASSERT(!info.alive());
1523         }
1524     }
1525 }
1526
1527 // If we are making type predictions about our arguments then
1528 // we need to check that they are correct on function entry.
1529 void SpeculativeJIT::checkArgumentTypes()
1530 {
1531     ASSERT(!m_currentNode);
1532     m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1533
1534     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1535         Node* node = m_jit.graph().m_arguments[i];
1536         if (!node) {
1537             // The argument is dead. We don't do any checks for such arguments.
1538             continue;
1539         }
1540         
1541         ASSERT(node->op() == SetArgument);
1542         ASSERT(node->shouldGenerate());
1543
1544         VariableAccessData* variableAccessData = node->variableAccessData();
1545         FlushFormat format = variableAccessData->flushFormat();
1546         
1547         if (format == FlushedJSValue)
1548             continue;
1549         
1550         VirtualRegister virtualRegister = variableAccessData->local();
1551
1552         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1553         
1554 #if USE(JSVALUE64)
1555         switch (format) {
1556         case FlushedInt32: {
1557             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1558             break;
1559         }
1560         case FlushedBoolean: {
1561             GPRTemporary temp(this);
1562             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1563             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1564             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1565             break;
1566         }
1567         case FlushedCell: {
1568             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1569             break;
1570         }
1571         default:
1572             RELEASE_ASSERT_NOT_REACHED();
1573             break;
1574         }
1575 #else
1576         switch (format) {
1577         case FlushedInt32: {
1578             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1579             break;
1580         }
1581         case FlushedBoolean: {
1582             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1583             break;
1584         }
1585         case FlushedCell: {
1586             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1587             break;
1588         }
1589         default:
1590             RELEASE_ASSERT_NOT_REACHED();
1591             break;
1592         }
1593 #endif
1594     }
1595
1596     m_origin = NodeOrigin();
1597 }
1598
1599 bool SpeculativeJIT::compile()
1600 {
1601     checkArgumentTypes();
1602     
1603     ASSERT(!m_currentNode);
1604     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1605         m_jit.setForBlockIndex(blockIndex);
1606         m_block = m_jit.graph().block(blockIndex);
1607         compileCurrentBlock();
1608     }
1609     linkBranches();
1610     return true;
1611 }
1612
1613 void SpeculativeJIT::createOSREntries()
1614 {
1615     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1616         BasicBlock* block = m_jit.graph().block(blockIndex);
1617         if (!block)
1618             continue;
1619         if (!block->isOSRTarget)
1620             continue;
1621         
1622         // Currently we don't have OSR entry trampolines. We could add them
1623         // here if need be.
1624         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1625     }
1626 }
1627
1628 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1629 {
1630     unsigned osrEntryIndex = 0;
1631     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1632         BasicBlock* block = m_jit.graph().block(blockIndex);
1633         if (!block)
1634             continue;
1635         if (!block->isOSRTarget)
1636             continue;
1637         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1638     }
1639     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1640     
1641     if (verboseCompilationEnabled()) {
1642         DumpContext dumpContext;
1643         dataLog("OSR Entries:\n");
1644         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1645             dataLog("    ", inContext(entryData, &dumpContext), "\n");
1646         if (!dumpContext.isEmpty())
1647             dumpContext.dump(WTF::dataFile());
1648     }
1649 }
1650
1651 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1652 {
1653     Edge child3 = m_jit.graph().varArgChild(node, 2);
1654     Edge child4 = m_jit.graph().varArgChild(node, 3);
1655
1656     ArrayMode arrayMode = node->arrayMode();
1657     
1658     GPRReg baseReg = base.gpr();
1659     GPRReg propertyReg = property.gpr();
1660     
1661     SpeculateDoubleOperand value(this, child3);
1662
1663     FPRReg valueReg = value.fpr();
1664     
1665     DFG_TYPE_CHECK(
1666         JSValueRegs(), child3, SpecFullRealNumber,
1667         m_jit.branchDouble(
1668             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1669     
1670     if (!m_compileOkay)
1671         return;
1672     
1673     StorageOperand storage(this, child4);
1674     GPRReg storageReg = storage.gpr();
1675
1676     if (node->op() == PutByValAlias) {
1677         // Store the value to the array.
1678         GPRReg propertyReg = property.gpr();
1679         FPRReg valueReg = value.fpr();
1680         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1681         
1682         noResult(m_currentNode);
1683         return;
1684     }
1685     
1686     GPRTemporary temporary;
1687     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1688
1689     MacroAssembler::Jump slowCase;
1690     
1691     if (arrayMode.isInBounds()) {
1692         speculationCheck(
1693             OutOfBounds, JSValueRegs(), 0,
1694             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1695     } else {
1696         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1697         
1698         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1699         
1700         if (!arrayMode.isOutOfBounds())
1701             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1702         
1703         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1704         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1705         
1706         inBounds.link(&m_jit);
1707     }
1708     
1709     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1710
1711     base.use();
1712     property.use();
1713     value.use();
1714     storage.use();
1715     
1716     if (arrayMode.isOutOfBounds()) {
1717         addSlowPathGenerator(
1718             slowPathCall(
1719                 slowCase, this,
1720                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1721                 NoResult, baseReg, propertyReg, valueReg));
1722     }
1723
1724     noResult(m_currentNode, UseChildrenCalledExplicitly);
1725 }
1726
1727 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1728 {
1729     SpeculateCellOperand string(this, node->child1());
1730     SpeculateStrictInt32Operand index(this, node->child2());
1731     StorageOperand storage(this, node->child3());
1732
1733     GPRReg stringReg = string.gpr();
1734     GPRReg indexReg = index.gpr();
1735     GPRReg storageReg = storage.gpr();
1736     
1737     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1738
1739     // unsigned comparison so we can filter out negative indices and indices that are too large
1740     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1741
1742     GPRTemporary scratch(this);
1743     GPRReg scratchReg = scratch.gpr();
1744
1745     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1746
1747     // Load the character into scratchReg
1748     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1749
1750     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1751     JITCompiler::Jump cont8Bit = m_jit.jump();
1752
1753     is16Bit.link(&m_jit);
1754
1755     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1756
1757     cont8Bit.link(&m_jit);
1758
1759     int32Result(scratchReg, m_currentNode);
1760 }
1761
1762 void SpeculativeJIT::compileGetByValOnString(Node* node)
1763 {
1764     SpeculateCellOperand base(this, node->child1());
1765     SpeculateStrictInt32Operand property(this, node->child2());
1766     StorageOperand storage(this, node->child3());
1767     GPRReg baseReg = base.gpr();
1768     GPRReg propertyReg = property.gpr();
1769     GPRReg storageReg = storage.gpr();
1770
1771     GPRTemporary scratch(this);
1772     GPRReg scratchReg = scratch.gpr();
1773 #if USE(JSVALUE32_64)
1774     GPRTemporary resultTag;
1775     GPRReg resultTagReg = InvalidGPRReg;
1776     if (node->arrayMode().isOutOfBounds()) {
1777         GPRTemporary realResultTag(this);
1778         resultTag.adopt(realResultTag);
1779         resultTagReg = resultTag.gpr();
1780     }
1781 #endif
1782
1783     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1784
1785     // unsigned comparison so we can filter out negative indices and indices that are too large
1786     JITCompiler::Jump outOfBounds = m_jit.branch32(
1787         MacroAssembler::AboveOrEqual, propertyReg,
1788         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1789     if (node->arrayMode().isInBounds())
1790         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1791
1792     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1793
1794     // Load the character into scratchReg
1795     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1796
1797     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1798     JITCompiler::Jump cont8Bit = m_jit.jump();
1799
1800     is16Bit.link(&m_jit);
1801
1802     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1803
1804     JITCompiler::Jump bigCharacter =
1805         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1806
1807     // 8 bit string values don't need the isASCII check.
1808     cont8Bit.link(&m_jit);
1809
1810     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1811     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1812     m_jit.loadPtr(scratchReg, scratchReg);
1813
1814     addSlowPathGenerator(
1815         slowPathCall(
1816             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1817
1818     if (node->arrayMode().isOutOfBounds()) {
1819 #if USE(JSVALUE32_64)
1820         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1821 #endif
1822
1823         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1824         if (globalObject->stringPrototypeChainIsSane()) {
1825             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
1826             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
1827             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
1828             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
1829             // indexed properties either.
1830             // https://bugs.webkit.org/show_bug.cgi?id=144668
1831             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
1832             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
1833             
1834 #if USE(JSVALUE64)
1835             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1836                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
1837 #else
1838             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1839                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
1840                 baseReg, propertyReg));
1841 #endif
1842         } else {
1843 #if USE(JSVALUE64)
1844             addSlowPathGenerator(
1845                 slowPathCall(
1846                     outOfBounds, this, operationGetByValStringInt,
1847                     scratchReg, baseReg, propertyReg));
1848 #else
1849             addSlowPathGenerator(
1850                 slowPathCall(
1851                     outOfBounds, this, operationGetByValStringInt,
1852                     resultTagReg, scratchReg, baseReg, propertyReg));
1853 #endif
1854         }
1855         
1856 #if USE(JSVALUE64)
1857         jsValueResult(scratchReg, m_currentNode);
1858 #else
1859         jsValueResult(resultTagReg, scratchReg, m_currentNode);
1860 #endif
1861     } else
1862         cellResult(scratchReg, m_currentNode);
1863 }
1864
1865 void SpeculativeJIT::compileFromCharCode(Node* node)
1866 {
1867     SpeculateStrictInt32Operand property(this, node->child1());
1868     GPRReg propertyReg = property.gpr();
1869     GPRTemporary smallStrings(this);
1870     GPRTemporary scratch(this);
1871     GPRReg scratchReg = scratch.gpr();
1872     GPRReg smallStringsReg = smallStrings.gpr();
1873
1874     JITCompiler::JumpList slowCases;
1875     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
1876     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
1877     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
1878
1879     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
1880     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
1881     cellResult(scratchReg, m_currentNode);
1882 }
1883
1884 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
1885 {
1886     VirtualRegister virtualRegister = node->virtualRegister();
1887     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1888
1889     switch (info.registerFormat()) {
1890     case DataFormatStorage:
1891         RELEASE_ASSERT_NOT_REACHED();
1892
1893     case DataFormatBoolean:
1894     case DataFormatCell:
1895         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1896         return GeneratedOperandTypeUnknown;
1897
1898     case DataFormatNone:
1899     case DataFormatJSCell:
1900     case DataFormatJS:
1901     case DataFormatJSBoolean:
1902     case DataFormatJSDouble:
1903         return GeneratedOperandJSValue;
1904
1905     case DataFormatJSInt32:
1906     case DataFormatInt32:
1907         return GeneratedOperandInteger;
1908
1909     default:
1910         RELEASE_ASSERT_NOT_REACHED();
1911         return GeneratedOperandTypeUnknown;
1912     }
1913 }
1914
1915 void SpeculativeJIT::compileValueToInt32(Node* node)
1916 {
1917     switch (node->child1().useKind()) {
1918 #if USE(JSVALUE64)
1919     case Int52RepUse: {
1920         SpeculateStrictInt52Operand op1(this, node->child1());
1921         GPRTemporary result(this, Reuse, op1);
1922         GPRReg op1GPR = op1.gpr();
1923         GPRReg resultGPR = result.gpr();
1924         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
1925         int32Result(resultGPR, node, DataFormatInt32);
1926         return;
1927     }
1928 #endif // USE(JSVALUE64)
1929         
1930     case DoubleRepUse: {
1931         GPRTemporary result(this);
1932         SpeculateDoubleOperand op1(this, node->child1());
1933         FPRReg fpr = op1.fpr();
1934         GPRReg gpr = result.gpr();
1935         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
1936         
1937         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
1938         
1939         int32Result(gpr, node);
1940         return;
1941     }
1942     
1943     case NumberUse:
1944     case NotCellUse: {
1945         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
1946         case GeneratedOperandInteger: {
1947             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
1948             GPRTemporary result(this, Reuse, op1);
1949             m_jit.move(op1.gpr(), result.gpr());
1950             int32Result(result.gpr(), node, op1.format());
1951             return;
1952         }
1953         case GeneratedOperandJSValue: {
1954             GPRTemporary result(this);
1955 #if USE(JSVALUE64)
1956             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1957
1958             GPRReg gpr = op1.gpr();
1959             GPRReg resultGpr = result.gpr();
1960             FPRTemporary tempFpr(this);
1961             FPRReg fpr = tempFpr.fpr();
1962
1963             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
1964             JITCompiler::JumpList converted;
1965
1966             if (node->child1().useKind() == NumberUse) {
1967                 DFG_TYPE_CHECK(
1968                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
1969                     m_jit.branchTest64(
1970                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
1971             } else {
1972                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
1973                 
1974                 DFG_TYPE_CHECK(
1975                     JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
1976                 
1977                 // It's not a cell: so true turns into 1 and all else turns into 0.
1978                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
1979                 converted.append(m_jit.jump());
1980                 
1981                 isNumber.link(&m_jit);
1982             }
1983
1984             // First, if we get here we have a double encoded as a JSValue
1985             m_jit.move(gpr, resultGpr);
1986             unboxDouble(resultGpr, fpr);
1987
1988             silentSpillAllRegisters(resultGpr);
1989             callOperation(toInt32, resultGpr, fpr);
1990             silentFillAllRegisters(resultGpr);
1991
1992             converted.append(m_jit.jump());
1993
1994             isInteger.link(&m_jit);
1995             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
1996
1997             converted.link(&m_jit);
1998 #else
1999             Node* childNode = node->child1().node();
2000             VirtualRegister virtualRegister = childNode->virtualRegister();
2001             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2002
2003             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2004
2005             GPRReg payloadGPR = op1.payloadGPR();
2006             GPRReg resultGpr = result.gpr();
2007         
2008             JITCompiler::JumpList converted;
2009
2010             if (info.registerFormat() == DataFormatJSInt32)
2011                 m_jit.move(payloadGPR, resultGpr);
2012             else {
2013                 GPRReg tagGPR = op1.tagGPR();
2014                 FPRTemporary tempFpr(this);
2015                 FPRReg fpr = tempFpr.fpr();
2016                 FPRTemporary scratch(this);
2017
2018                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2019
2020                 if (node->child1().useKind() == NumberUse) {
2021                     DFG_TYPE_CHECK(
2022                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2023                         m_jit.branch32(
2024                             MacroAssembler::AboveOrEqual, tagGPR,
2025                             TrustedImm32(JSValue::LowestTag)));
2026                 } else {
2027                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2028                     
2029                     DFG_TYPE_CHECK(
2030                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2031                         m_jit.branchIfCell(op1.jsValueRegs()));
2032                     
2033                     // It's not a cell: so true turns into 1 and all else turns into 0.
2034                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2035                     m_jit.move(TrustedImm32(0), resultGpr);
2036                     converted.append(m_jit.jump());
2037                     
2038                     isBoolean.link(&m_jit);
2039                     m_jit.move(payloadGPR, resultGpr);
2040                     converted.append(m_jit.jump());
2041                     
2042                     isNumber.link(&m_jit);
2043                 }
2044
2045                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2046
2047                 silentSpillAllRegisters(resultGpr);
2048                 callOperation(toInt32, resultGpr, fpr);
2049                 silentFillAllRegisters(resultGpr);
2050
2051                 converted.append(m_jit.jump());
2052
2053                 isInteger.link(&m_jit);
2054                 m_jit.move(payloadGPR, resultGpr);
2055
2056                 converted.link(&m_jit);
2057             }
2058 #endif
2059             int32Result(resultGpr, node);
2060             return;
2061         }
2062         case GeneratedOperandTypeUnknown:
2063             RELEASE_ASSERT(!m_compileOkay);
2064             return;
2065         }
2066         RELEASE_ASSERT_NOT_REACHED();
2067         return;
2068     }
2069     
2070     default:
2071         ASSERT(!m_compileOkay);
2072         return;
2073     }
2074 }
2075
2076 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2077 {
2078     if (doesOverflow(node->arithMode())) {
2079         // We know that this sometimes produces doubles. So produce a double every
2080         // time. This at least allows subsequent code to not have weird conditionals.
2081             
2082         SpeculateInt32Operand op1(this, node->child1());
2083         FPRTemporary result(this);
2084             
2085         GPRReg inputGPR = op1.gpr();
2086         FPRReg outputFPR = result.fpr();
2087             
2088         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2089             
2090         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2091         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2092         positive.link(&m_jit);
2093             
2094         doubleResult(outputFPR, node);
2095         return;
2096     }
2097     
2098     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2099
2100     SpeculateInt32Operand op1(this, node->child1());
2101     GPRTemporary result(this);
2102
2103     m_jit.move(op1.gpr(), result.gpr());
2104
2105     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2106
2107     int32Result(result.gpr(), node, op1.format());
2108 }
2109
2110 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2111 {
2112     SpeculateDoubleOperand op1(this, node->child1());
2113     FPRTemporary scratch(this);
2114     GPRTemporary result(this);
2115     
2116     FPRReg valueFPR = op1.fpr();
2117     FPRReg scratchFPR = scratch.fpr();
2118     GPRReg resultGPR = result.gpr();
2119
2120     JITCompiler::JumpList failureCases;
2121     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2122     m_jit.branchConvertDoubleToInt32(
2123         valueFPR, resultGPR, failureCases, scratchFPR,
2124         shouldCheckNegativeZero(node->arithMode()));
2125     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2126
2127     int32Result(resultGPR, node);
2128 }
2129
2130 void SpeculativeJIT::compileDoubleRep(Node* node)
2131 {
2132     switch (node->child1().useKind()) {
2133     case RealNumberUse: {
2134         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2135         FPRTemporary result(this);
2136         
2137         JSValueRegs op1Regs = op1.jsValueRegs();
2138         FPRReg resultFPR = result.fpr();
2139         
2140 #if USE(JSVALUE64)
2141         GPRTemporary temp(this);
2142         GPRReg tempGPR = temp.gpr();
2143         m_jit.move(op1Regs.gpr(), tempGPR);
2144         m_jit.unboxDoubleWithoutAssertions(tempGPR, resultFPR);
2145 #else
2146         FPRTemporary temp(this);
2147         FPRReg tempFPR = temp.fpr();
2148         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2149 #endif
2150         
2151         JITCompiler::Jump done = m_jit.branchDouble(
2152             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2153         
2154         DFG_TYPE_CHECK(
2155             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2156         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2157         
2158         done.link(&m_jit);
2159         
2160         doubleResult(resultFPR, node);
2161         return;
2162     }
2163     
2164     case NotCellUse:
2165     case NumberUse: {
2166         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2167
2168         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2169         if (isInt32Speculation(possibleTypes)) {
2170             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2171             FPRTemporary result(this);
2172             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2173             doubleResult(result.fpr(), node);
2174             return;
2175         }
2176
2177         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2178         FPRTemporary result(this);
2179
2180 #if USE(JSVALUE64)
2181         GPRTemporary temp(this);
2182
2183         GPRReg op1GPR = op1.gpr();
2184         GPRReg tempGPR = temp.gpr();
2185         FPRReg resultFPR = result.fpr();
2186         JITCompiler::JumpList done;
2187
2188         JITCompiler::Jump isInteger = m_jit.branch64(
2189             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2190
2191         if (node->child1().useKind() == NotCellUse) {
2192             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2193             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2194
2195             static const double zero = 0;
2196             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2197
2198             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2199             done.append(isNull);
2200
2201             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2202                 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2203
2204             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2205             static const double one = 1;
2206             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2207             done.append(m_jit.jump());
2208             done.append(isFalse);
2209
2210             isUndefined.link(&m_jit);
2211             static const double NaN = PNaN;
2212             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2213             done.append(m_jit.jump());
2214
2215             isNumber.link(&m_jit);
2216         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2217             typeCheck(
2218                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2219                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2220         }
2221     
2222         m_jit.move(op1GPR, tempGPR);
2223         unboxDouble(tempGPR, resultFPR);
2224         done.append(m_jit.jump());
2225     
2226         isInteger.link(&m_jit);
2227         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2228         done.link(&m_jit);
2229 #else // USE(JSVALUE64) -> this is the 32_64 case
2230         FPRTemporary temp(this);
2231     
2232         GPRReg op1TagGPR = op1.tagGPR();
2233         GPRReg op1PayloadGPR = op1.payloadGPR();
2234         FPRReg tempFPR = temp.fpr();
2235         FPRReg resultFPR = result.fpr();
2236         JITCompiler::JumpList done;
2237     
2238         JITCompiler::Jump isInteger = m_jit.branch32(
2239             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2240
2241         if (node->child1().useKind() == NotCellUse) {
2242             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2243             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2244
2245             static const double zero = 0;
2246             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2247
2248             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2249             done.append(isNull);
2250
2251             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2252
2253             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2254             static const double one = 1;
2255             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2256             done.append(m_jit.jump());
2257             done.append(isFalse);
2258
2259             isUndefined.link(&m_jit);
2260             static const double NaN = PNaN;
2261             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2262             done.append(m_jit.jump());
2263
2264             isNumber.link(&m_jit);
2265         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2266             typeCheck(
2267                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2268                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2269         }
2270
2271         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2272         done.append(m_jit.jump());
2273     
2274         isInteger.link(&m_jit);
2275         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2276         done.link(&m_jit);
2277 #endif // USE(JSVALUE64)
2278     
2279         doubleResult(resultFPR, node);
2280         return;
2281     }
2282         
2283 #if USE(JSVALUE64)
2284     case Int52RepUse: {
2285         SpeculateStrictInt52Operand value(this, node->child1());
2286         FPRTemporary result(this);
2287         
2288         GPRReg valueGPR = value.gpr();
2289         FPRReg resultFPR = result.fpr();
2290
2291         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2292         
2293         doubleResult(resultFPR, node);
2294         return;
2295     }
2296 #endif // USE(JSVALUE64)
2297         
2298     default:
2299         RELEASE_ASSERT_NOT_REACHED();
2300         return;
2301     }
2302 }
2303
2304 void SpeculativeJIT::compileValueRep(Node* node)
2305 {
2306     switch (node->child1().useKind()) {
2307     case DoubleRepUse: {
2308         SpeculateDoubleOperand value(this, node->child1());
2309         JSValueRegsTemporary result(this);
2310         
2311         FPRReg valueFPR = value.fpr();
2312         JSValueRegs resultRegs = result.regs();
2313         
2314         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2315         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2316         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2317         // local was purified.
2318         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2319             m_jit.purifyNaN(valueFPR);
2320
2321         boxDouble(valueFPR, resultRegs);
2322         
2323         jsValueResult(resultRegs, node);
2324         return;
2325     }
2326         
2327 #if USE(JSVALUE64)
2328     case Int52RepUse: {
2329         SpeculateStrictInt52Operand value(this, node->child1());
2330         GPRTemporary result(this);
2331         
2332         GPRReg valueGPR = value.gpr();
2333         GPRReg resultGPR = result.gpr();
2334         
2335         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2336         
2337         jsValueResult(resultGPR, node);
2338         return;
2339     }
2340 #endif // USE(JSVALUE64)
2341         
2342     default:
2343         RELEASE_ASSERT_NOT_REACHED();
2344         return;
2345     }
2346 }
2347
2348 static double clampDoubleToByte(double d)
2349 {
2350     d += 0.5;
2351     if (!(d > 0))
2352         d = 0;
2353     else if (d > 255)
2354         d = 255;
2355     return d;
2356 }
2357
2358 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2359 {
2360     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2361     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2362     jit.xorPtr(result, result);
2363     MacroAssembler::Jump clamped = jit.jump();
2364     tooBig.link(&jit);
2365     jit.move(JITCompiler::TrustedImm32(255), result);
2366     clamped.link(&jit);
2367     inBounds.link(&jit);
2368 }
2369
2370 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2371 {
2372     // Unordered compare so we pick up NaN
2373     static const double zero = 0;
2374     static const double byteMax = 255;
2375     static const double half = 0.5;
2376     jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2377     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2378     jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2379     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2380     
2381     jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2382     // FIXME: This should probably just use a floating point round!
2383     // https://bugs.webkit.org/show_bug.cgi?id=72054
2384     jit.addDouble(source, scratch);
2385     jit.truncateDoubleToInt32(scratch, result);   
2386     MacroAssembler::Jump truncatedInt = jit.jump();
2387     
2388     tooSmall.link(&jit);
2389     jit.xorPtr(result, result);
2390     MacroAssembler::Jump zeroed = jit.jump();
2391     
2392     tooBig.link(&jit);
2393     jit.move(JITCompiler::TrustedImm32(255), result);
2394     
2395     truncatedInt.link(&jit);
2396     zeroed.link(&jit);
2397
2398 }
2399
2400 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2401 {
2402     if (node->op() == PutByValAlias)
2403         return JITCompiler::Jump();
2404     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2405         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2406     if (view) {
2407         uint32_t length = view->length();
2408         Node* indexNode = m_jit.graph().child(node, 1).node();
2409         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2410             return JITCompiler::Jump();
2411         return m_jit.branch32(
2412             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2413     }
2414     return m_jit.branch32(
2415         MacroAssembler::AboveOrEqual, indexGPR,
2416         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2417 }
2418
2419 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2420 {
2421     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2422     if (!jump.isSet())
2423         return;
2424     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2425 }
2426
2427 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2428 {
2429     ASSERT(isInt(type));
2430     
2431     SpeculateCellOperand base(this, node->child1());
2432     SpeculateStrictInt32Operand property(this, node->child2());
2433     StorageOperand storage(this, node->child3());
2434
2435     GPRReg baseReg = base.gpr();
2436     GPRReg propertyReg = property.gpr();
2437     GPRReg storageReg = storage.gpr();
2438
2439     GPRTemporary result(this);
2440     GPRReg resultReg = result.gpr();
2441
2442     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2443
2444     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2445     switch (elementSize(type)) {
2446     case 1:
2447         if (isSigned(type))
2448             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2449         else
2450             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2451         break;
2452     case 2:
2453         if (isSigned(type))
2454             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2455         else
2456             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2457         break;
2458     case 4:
2459         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2460         break;
2461     default:
2462         CRASH();
2463     }
2464     if (elementSize(type) < 4 || isSigned(type)) {
2465         int32Result(resultReg, node);
2466         return;
2467     }
2468     
2469     ASSERT(elementSize(type) == 4 && !isSigned(type));
2470     if (node->shouldSpeculateInt32()) {
2471         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2472         int32Result(resultReg, node);
2473         return;
2474     }
2475     
2476 #if USE(JSVALUE64)
2477     if (node->shouldSpeculateMachineInt()) {
2478         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2479         strictInt52Result(resultReg, node);
2480         return;
2481     }
2482 #endif
2483     
2484     FPRTemporary fresult(this);
2485     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2486     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2487     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2488     positive.link(&m_jit);
2489     doubleResult(fresult.fpr(), node);
2490 }
2491
2492 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2493 {
2494     ASSERT(isInt(type));
2495     
2496     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2497     GPRReg storageReg = storage.gpr();
2498     
2499     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2500     
2501     GPRTemporary value;
2502     GPRReg valueGPR = InvalidGPRReg;
2503     
2504     if (valueUse->isConstant()) {
2505         JSValue jsValue = valueUse->asJSValue();
2506         if (!jsValue.isNumber()) {
2507             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2508             noResult(node);
2509             return;
2510         }
2511         double d = jsValue.asNumber();
2512         if (isClamped(type)) {
2513             ASSERT(elementSize(type) == 1);
2514             d = clampDoubleToByte(d);
2515         }
2516         GPRTemporary scratch(this);
2517         GPRReg scratchReg = scratch.gpr();
2518         m_jit.move(Imm32(toInt32(d)), scratchReg);
2519         value.adopt(scratch);
2520         valueGPR = scratchReg;
2521     } else {
2522         switch (valueUse.useKind()) {
2523         case Int32Use: {
2524             SpeculateInt32Operand valueOp(this, valueUse);
2525             GPRTemporary scratch(this);
2526             GPRReg scratchReg = scratch.gpr();
2527             m_jit.move(valueOp.gpr(), scratchReg);
2528             if (isClamped(type)) {
2529                 ASSERT(elementSize(type) == 1);
2530                 compileClampIntegerToByte(m_jit, scratchReg);
2531             }
2532             value.adopt(scratch);
2533             valueGPR = scratchReg;
2534             break;
2535         }
2536             
2537 #if USE(JSVALUE64)
2538         case Int52RepUse: {
2539             SpeculateStrictInt52Operand valueOp(this, valueUse);
2540             GPRTemporary scratch(this);
2541             GPRReg scratchReg = scratch.gpr();
2542             m_jit.move(valueOp.gpr(), scratchReg);
2543             if (isClamped(type)) {
2544                 ASSERT(elementSize(type) == 1);
2545                 MacroAssembler::Jump inBounds = m_jit.branch64(
2546                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2547                 MacroAssembler::Jump tooBig = m_jit.branch64(
2548                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2549                 m_jit.move(TrustedImm32(0), scratchReg);
2550                 MacroAssembler::Jump clamped = m_jit.jump();
2551                 tooBig.link(&m_jit);
2552                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2553                 clamped.link(&m_jit);
2554                 inBounds.link(&m_jit);
2555             }
2556             value.adopt(scratch);
2557             valueGPR = scratchReg;
2558             break;
2559         }
2560 #endif // USE(JSVALUE64)
2561             
2562         case DoubleRepUse: {
2563             if (isClamped(type)) {
2564                 ASSERT(elementSize(type) == 1);
2565                 SpeculateDoubleOperand valueOp(this, valueUse);
2566                 GPRTemporary result(this);
2567                 FPRTemporary floatScratch(this);
2568                 FPRReg fpr = valueOp.fpr();
2569                 GPRReg gpr = result.gpr();
2570                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2571                 value.adopt(result);
2572                 valueGPR = gpr;
2573             } else {
2574                 SpeculateDoubleOperand valueOp(this, valueUse);
2575                 GPRTemporary result(this);
2576                 FPRReg fpr = valueOp.fpr();
2577                 GPRReg gpr = result.gpr();
2578                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2579                 m_jit.xorPtr(gpr, gpr);
2580                 MacroAssembler::Jump fixed = m_jit.jump();
2581                 notNaN.link(&m_jit);
2582                 
2583                 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2584                     fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2585                 
2586                 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
2587                 
2588                 fixed.link(&m_jit);
2589                 value.adopt(result);
2590                 valueGPR = gpr;
2591             }
2592             break;
2593         }
2594             
2595         default:
2596             RELEASE_ASSERT_NOT_REACHED();
2597             break;
2598         }
2599     }
2600     
2601     ASSERT_UNUSED(valueGPR, valueGPR != property);
2602     ASSERT(valueGPR != base);
2603     ASSERT(valueGPR != storageReg);
2604     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2605     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2606         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2607         outOfBounds = MacroAssembler::Jump();
2608     }
2609
2610     switch (elementSize(type)) {
2611     case 1:
2612         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2613         break;
2614     case 2:
2615         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2616         break;
2617     case 4:
2618         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2619         break;
2620     default:
2621         CRASH();
2622     }
2623     if (outOfBounds.isSet())
2624         outOfBounds.link(&m_jit);
2625     noResult(node);
2626 }
2627
2628 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2629 {
2630     ASSERT(isFloat(type));
2631     
2632     SpeculateCellOperand base(this, node->child1());
2633     SpeculateStrictInt32Operand property(this, node->child2());
2634     StorageOperand storage(this, node->child3());
2635
2636     GPRReg baseReg = base.gpr();
2637     GPRReg propertyReg = property.gpr();
2638     GPRReg storageReg = storage.gpr();
2639
2640     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2641
2642     FPRTemporary result(this);
2643     FPRReg resultReg = result.fpr();
2644     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2645     switch (elementSize(type)) {
2646     case 4:
2647         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2648         m_jit.convertFloatToDouble(resultReg, resultReg);
2649         break;
2650     case 8: {
2651         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2652         break;
2653     }
2654     default:
2655         RELEASE_ASSERT_NOT_REACHED();
2656     }
2657     
2658     doubleResult(resultReg, node);
2659 }
2660
2661 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2662 {
2663     ASSERT(isFloat(type));
2664     
2665     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2666     GPRReg storageReg = storage.gpr();
2667     
2668     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2669     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2670
2671     SpeculateDoubleOperand valueOp(this, valueUse);
2672     FPRTemporary scratch(this);
2673     FPRReg valueFPR = valueOp.fpr();
2674     FPRReg scratchFPR = scratch.fpr();
2675
2676     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2677     
2678     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2679     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2680         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2681         outOfBounds = MacroAssembler::Jump();
2682     }
2683     
2684     switch (elementSize(type)) {
2685     case 4: {
2686         m_jit.moveDouble(valueFPR, scratchFPR);
2687         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2688         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2689         break;
2690     }
2691     case 8:
2692         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2693         break;
2694     default:
2695         RELEASE_ASSERT_NOT_REACHED();
2696     }
2697     if (outOfBounds.isSet())
2698         outOfBounds.link(&m_jit);
2699     noResult(node);
2700 }
2701
2702 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2703 {
2704     // Check that prototype is an object.
2705     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
2706     
2707     // Initialize scratchReg with the value being checked.
2708     m_jit.move(valueReg, scratchReg);
2709     
2710     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2711     MacroAssembler::Label loop(&m_jit);
2712     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2713     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2714     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2715 #if USE(JSVALUE64)
2716     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2717 #else
2718     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2719 #endif
2720     
2721     // No match - result is false.
2722 #if USE(JSVALUE64)
2723     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2724 #else
2725     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2726 #endif
2727     MacroAssembler::Jump putResult = m_jit.jump();
2728     
2729     isInstance.link(&m_jit);
2730 #if USE(JSVALUE64)
2731     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2732 #else
2733     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2734 #endif
2735     
2736     putResult.link(&m_jit);
2737 }
2738
2739 void SpeculativeJIT::compileInstanceOf(Node* node)
2740 {
2741     if (node->child1().useKind() == UntypedUse) {
2742         // It might not be a cell. Speculate less aggressively.
2743         // Or: it might only be used once (i.e. by us), so we get zero benefit
2744         // from speculating any more aggressively than we absolutely need to.
2745         
2746         JSValueOperand value(this, node->child1());
2747         SpeculateCellOperand prototype(this, node->child2());
2748         GPRTemporary scratch(this);
2749         GPRTemporary scratch2(this);
2750         
2751         GPRReg prototypeReg = prototype.gpr();
2752         GPRReg scratchReg = scratch.gpr();
2753         GPRReg scratch2Reg = scratch2.gpr();
2754         
2755         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
2756         GPRReg valueReg = value.jsValueRegs().payloadGPR();
2757         moveFalseTo(scratchReg);
2758
2759         MacroAssembler::Jump done = m_jit.jump();
2760         
2761         isCell.link(&m_jit);
2762         
2763         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2764         
2765         done.link(&m_jit);
2766
2767         blessedBooleanResult(scratchReg, node);
2768         return;
2769     }
2770     
2771     SpeculateCellOperand value(this, node->child1());
2772     SpeculateCellOperand prototype(this, node->child2());
2773     
2774     GPRTemporary scratch(this);
2775     GPRTemporary scratch2(this);
2776     
2777     GPRReg valueReg = value.gpr();
2778     GPRReg prototypeReg = prototype.gpr();
2779     GPRReg scratchReg = scratch.gpr();
2780     GPRReg scratch2Reg = scratch2.gpr();
2781     
2782     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2783
2784     blessedBooleanResult(scratchReg, node);
2785 }
2786
2787 void SpeculativeJIT::compileValueAdd(Node* node)
2788 {
2789     if (isKnownNotNumber(node->child1().node()) || isKnownNotNumber(node->child2().node())) {
2790         JSValueOperand left(this, node->child1());
2791         JSValueOperand right(this, node->child2());
2792         JSValueRegs leftRegs = left.jsValueRegs();
2793         JSValueRegs rightRegs = right.jsValueRegs();
2794 #if USE(JSVALUE64)
2795         GPRTemporary result(this);
2796         JSValueRegs resultRegs = JSValueRegs(result.gpr());
2797 #else
2798         GPRTemporary resultTag(this);
2799         GPRTemporary resultPayload(this);
2800         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2801 #endif
2802         flushRegisters();
2803         callOperation(operationValueAddNotNumber, resultRegs, leftRegs, rightRegs);
2804         m_jit.exceptionCheck();
2805     
2806         jsValueResult(resultRegs, node);
2807         return;
2808     }
2809
2810     bool leftIsConstInt32 = node->child1()->isInt32Constant();
2811     bool rightIsConstInt32 = node->child2()->isInt32Constant();
2812
2813     // The DFG does not always fold the sum of 2 constant int operands together.
2814     if (leftIsConstInt32 && rightIsConstInt32) {
2815 #if USE(JSVALUE64)
2816         GPRTemporary result(this);
2817         JSValueRegs resultRegs = JSValueRegs(result.gpr());
2818 #else
2819         GPRTemporary resultTag(this);
2820         GPRTemporary resultPayload(this);
2821         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2822 #endif
2823         int64_t leftConst = node->child1()->asInt32();
2824         int64_t rightConst = node->child2()->asInt32();
2825         int64_t resultConst = leftConst + rightConst;
2826         m_jit.moveValue(JSValue(resultConst), resultRegs);
2827         jsValueResult(resultRegs, node);
2828         return;
2829     }
2830
2831     Optional<JSValueOperand> left;
2832     Optional<JSValueOperand> right;
2833
2834     JSValueRegs leftRegs;
2835     JSValueRegs rightRegs;
2836
2837     FPRTemporary leftNumber(this);
2838     FPRTemporary rightNumber(this);
2839     FPRReg leftFPR = leftNumber.fpr();
2840     FPRReg rightFPR = rightNumber.fpr();
2841
2842 #if USE(JSVALUE64)
2843     GPRTemporary result(this);
2844     JSValueRegs resultRegs = JSValueRegs(result.gpr());
2845     GPRTemporary scratch(this);
2846     GPRReg scratchGPR = scratch.gpr();
2847     FPRReg scratchFPR = InvalidFPRReg;
2848 #else
2849     GPRTemporary resultTag(this);
2850     GPRTemporary resultPayload(this);
2851     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2852     GPRReg scratchGPR = resultTag.gpr();
2853     FPRTemporary fprScratch(this);
2854     FPRReg scratchFPR = fprScratch.fpr();
2855 #endif
2856
2857     SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
2858     SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
2859
2860     if (leftIsConstInt32)
2861         leftOperand.setConstInt32(node->child1()->asInt32());
2862     if (rightIsConstInt32)
2863         rightOperand.setConstInt32(node->child2()->asInt32());
2864
2865     ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
2866
2867     if (!leftOperand.isConst()) {
2868         left = JSValueOperand(this, node->child1());
2869         leftRegs = left->jsValueRegs();
2870     }
2871     if (!rightOperand.isConst()) {
2872         right = JSValueOperand(this, node->child2());
2873         rightRegs = right->jsValueRegs();
2874     }
2875
2876     JITAddGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
2877         leftFPR, rightFPR, scratchGPR, scratchFPR);
2878     gen.generateFastPath(m_jit);
2879
2880     ASSERT(gen.didEmitFastPath());
2881     gen.endJumpList().append(m_jit.jump());
2882
2883     gen.slowPathJumpList().link(&m_jit);
2884
2885     silentSpillAllRegisters(resultRegs);
2886
2887     if (leftIsConstInt32) {
2888         leftRegs = resultRegs;
2889         int64_t leftConst = node->child1()->asInt32();
2890         m_jit.moveValue(JSValue(leftConst), leftRegs);
2891     } else if (rightIsConstInt32) {
2892         rightRegs = resultRegs;
2893         int64_t rightConst = node->child2()->asInt32();
2894         m_jit.moveValue(JSValue(rightConst), rightRegs);
2895     }
2896
2897     callOperation(operationValueAdd, resultRegs, leftRegs, rightRegs);
2898
2899     silentFillAllRegisters(resultRegs);
2900     m_jit.exceptionCheck();
2901
2902     gen.endJumpList().link(&m_jit);
2903     jsValueResult(resultRegs, node);
2904     return;
2905 }
2906
2907 void SpeculativeJIT::compileArithAdd(Node* node)
2908 {
2909     switch (node->binaryUseKind()) {
2910     case Int32Use: {
2911         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2912         
2913         if (node->child1()->isInt32Constant()) {
2914             int32_t imm1 = node->child1()->asInt32();
2915             SpeculateInt32Operand op2(this, node->child2());
2916             GPRTemporary result(this);
2917
2918             if (!shouldCheckOverflow(node->arithMode())) {
2919                 m_jit.move(op2.gpr(), result.gpr());
2920                 m_jit.add32(Imm32(imm1), result.gpr());
2921             } else
2922                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
2923
2924             int32Result(result.gpr(), node);
2925             return;
2926         }
2927         
2928         if (node->child2()->isInt32Constant()) {
2929             SpeculateInt32Operand op1(this, node->child1());
2930             int32_t imm2 = node->child2()->asInt32();
2931             GPRTemporary result(this);
2932                 
2933             if (!shouldCheckOverflow(node->arithMode())) {
2934                 m_jit.move(op1.gpr(), result.gpr());
2935                 m_jit.add32(Imm32(imm2), result.gpr());
2936             } else
2937                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
2938
2939             int32Result(result.gpr(), node);
2940             return;
2941         }
2942                 
2943         SpeculateInt32Operand op1(this, node->child1());
2944         SpeculateInt32Operand op2(this, node->child2());
2945         GPRTemporary result(this, Reuse, op1, op2);
2946
2947         GPRReg gpr1 = op1.gpr();
2948         GPRReg gpr2 = op2.gpr();
2949         GPRReg gprResult = result.gpr();
2950
2951         if (!shouldCheckOverflow(node->arithMode())) {
2952             if (gpr1 == gprResult)
2953                 m_jit.add32(gpr2, gprResult);
2954             else {
2955                 m_jit.move(gpr2, gprResult);
2956                 m_jit.add32(gpr1, gprResult);
2957             }
2958         } else {
2959             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
2960                 
2961             if (gpr1 == gprResult)
2962                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
2963             else if (gpr2 == gprResult)
2964                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
2965             else
2966                 speculationCheck(Overflow, JSValueRegs(), 0, check);
2967         }
2968
2969         int32Result(gprResult, node);
2970         return;
2971     }
2972         
2973 #if USE(JSVALUE64)
2974     case Int52RepUse: {
2975         ASSERT(shouldCheckOverflow(node->arithMode()));
2976         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2977
2978         // Will we need an overflow check? If we can prove that neither input can be
2979         // Int52 then the overflow check will not be necessary.
2980         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2981             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2982             SpeculateWhicheverInt52Operand op1(this, node->child1());
2983             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2984             GPRTemporary result(this, Reuse, op1);
2985             m_jit.move(op1.gpr(), result.gpr());
2986             m_jit.add64(op2.gpr(), result.gpr());
2987             int52Result(result.gpr(), node, op1.format());
2988             return;
2989         }
2990         
2991         SpeculateInt52Operand op1(this, node->child1());
2992         SpeculateInt52Operand op2(this, node->child2());
2993         GPRTemporary result(this);
2994         m_jit.move(op1.gpr(), result.gpr());
2995         speculationCheck(
2996             Int52Overflow, JSValueRegs(), 0,
2997             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2998         int52Result(result.gpr(), node);
2999         return;
3000     }
3001 #endif // USE(JSVALUE64)
3002     
3003     case DoubleRepUse: {
3004         SpeculateDoubleOperand op1(this, node->child1());
3005         SpeculateDoubleOperand op2(this, node->child2());
3006         FPRTemporary result(this, op1, op2);
3007
3008         FPRReg reg1 = op1.fpr();
3009         FPRReg reg2 = op2.fpr();
3010         m_jit.addDouble(reg1, reg2, result.fpr());
3011
3012         doubleResult(result.fpr(), node);
3013         return;
3014     }
3015         
3016     default:
3017         RELEASE_ASSERT_NOT_REACHED();
3018         break;
3019     }
3020 }
3021
3022 void SpeculativeJIT::compileMakeRope(Node* node)
3023 {
3024     ASSERT(node->child1().useKind() == KnownStringUse);
3025     ASSERT(node->child2().useKind() == KnownStringUse);
3026     ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
3027     
3028     SpeculateCellOperand op1(this, node->child1());
3029     SpeculateCellOperand op2(this, node->child2());
3030     SpeculateCellOperand op3(this, node->child3());
3031     GPRTemporary result(this);
3032     GPRTemporary allocator(this);
3033     GPRTemporary scratch(this);
3034     
3035     GPRReg opGPRs[3];
3036     unsigned numOpGPRs;
3037     opGPRs[0] = op1.gpr();
3038     opGPRs[1] = op2.gpr();
3039     if (node->child3()) {
3040         opGPRs[2] = op3.gpr();
3041         numOpGPRs = 3;
3042     } else {
3043         opGPRs[2] = InvalidGPRReg;
3044         numOpGPRs = 2;
3045     }
3046     GPRReg resultGPR = result.gpr();
3047     GPRReg allocatorGPR = allocator.gpr();
3048     GPRReg scratchGPR = scratch.gpr();
3049     
3050     JITCompiler::JumpList slowPath;
3051     MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithDestructor(sizeof(JSRopeString));
3052     m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
3053     emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
3054         
3055     m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
3056     for (unsigned i = 0; i < numOpGPRs; ++i)
3057         m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3058     for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
3059         m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3060     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
3061     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
3062     if (!ASSERT_DISABLED) {
3063         JITCompiler::Jump ok = m_jit.branch32(
3064             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
3065         m_jit.abortWithReason(DFGNegativeStringLength);
3066         ok.link(&m_jit);
3067     }
3068     for (unsigned i = 1; i < numOpGPRs; ++i) {
3069         m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
3070         speculationCheck(
3071             Uncountable, JSValueSource(), nullptr,
3072             m_jit.branchAdd32(
3073                 JITCompiler::Overflow,
3074                 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
3075     }
3076     m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
3077     m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
3078     if (!ASSERT_DISABLED) {
3079         JITCompiler::Jump ok = m_jit.branch32(
3080             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
3081         m_jit.abortWithReason(DFGNegativeStringLength);
3082         ok.link(&m_jit);
3083     }
3084     m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
3085     
3086     switch (numOpGPRs) {
3087     case 2:
3088         addSlowPathGenerator(slowPathCall(
3089             slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
3090         break;
3091     case 3:
3092         addSlowPathGenerator(slowPathCall(
3093             slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
3094         break;
3095     default:
3096         RELEASE_ASSERT_NOT_REACHED();
3097         break;
3098     }
3099         
3100     cellResult(resultGPR, node);
3101 }
3102
3103 void SpeculativeJIT::compileArithClz32(Node* node)
3104 {
3105     ASSERT_WITH_MESSAGE(node->child1().useKind() == Int32Use || node->child1().useKind() == KnownInt32Use, "The Fixup phase should have enforced a Int32 operand.");
3106     SpeculateInt32Operand value(this, node->child1());
3107     GPRTemporary result(this, Reuse, value);
3108     GPRReg valueReg = value.gpr();
3109     GPRReg resultReg = result.gpr();
3110     m_jit.countLeadingZeros32(valueReg, resultReg);
3111     int32Result(resultReg, node);
3112 }
3113
3114 void SpeculativeJIT::compileArithSub(Node* node)
3115 {
3116     switch (node->binaryUseKind()) {
3117     case Int32Use: {
3118         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3119         
3120         if (node->child2()->isInt32Constant()) {
3121             SpeculateInt32Operand op1(this, node->child1());
3122             int32_t imm2 = node->child2()->asInt32();
3123             GPRTemporary result(this);
3124
3125             if (!shouldCheckOverflow(node->arithMode())) {
3126                 m_jit.move(op1.gpr(), result.gpr());
3127                 m_jit.sub32(Imm32(imm2), result.gpr());
3128             } else {
3129                 GPRTemporary scratch(this);
3130                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
3131             }
3132
3133             int32Result(result.gpr(), node);
3134             return;
3135         }
3136             
3137         if (node->child1()->isInt32Constant()) {
3138             int32_t imm1 = node->child1()->asInt32();
3139             SpeculateInt32Operand op2(this, node->child2());
3140             GPRTemporary result(this);
3141                 
3142             m_jit.move(Imm32(imm1), result.gpr());
3143             if (!shouldCheckOverflow(node->arithMode()))
3144                 m_jit.sub32(op2.gpr(), result.gpr());
3145             else
3146                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3147                 
3148             int32Result(result.gpr(), node);
3149             return;
3150         }
3151             
3152         SpeculateInt32Operand op1(this, node->child1());
3153         SpeculateInt32Operand op2(this, node->child2());
3154         GPRTemporary result(this);
3155
3156         if (!shouldCheckOverflow(node->arithMode())) {
3157             m_jit.move(op1.gpr(), result.gpr());
3158             m_jit.sub32(op2.gpr(), result.gpr());
3159         } else
3160             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
3161
3162         int32Result(result.gpr(), node);
3163         return;
3164     }
3165         
3166 #if USE(JSVALUE64)
3167     case Int52RepUse: {
3168         ASSERT(shouldCheckOverflow(node->arithMode()));
3169         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3170
3171         // Will we need an overflow check? If we can prove that neither input can be
3172         // Int52 then the overflow check will not be necessary.
3173         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
3174             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
3175             SpeculateWhicheverInt52Operand op1(this, node->child1());
3176             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3177             GPRTemporary result(this, Reuse, op1);
3178             m_jit.move(op1.gpr(), result.gpr());
3179             m_jit.sub64(op2.gpr(), result.gpr());
3180             int52Result(result.gpr(), node, op1.format());
3181             return;
3182         }
3183         
3184         SpeculateInt52Operand op1(this, node->child1());
3185         SpeculateInt52Operand op2(this, node->child2());
3186         GPRTemporary result(this);
3187         m_jit.move(op1.gpr(), result.gpr());
3188         speculationCheck(
3189             Int52Overflow, JSValueRegs(), 0,
3190             m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3191         int52Result(result.gpr(), node);
3192         return;
3193     }
3194 #endif // USE(JSVALUE64)
3195
3196     case DoubleRepUse: {
3197         SpeculateDoubleOperand op1(this, node->child1());
3198         SpeculateDoubleOperand op2(this, node->child2());
3199         FPRTemporary result(this, op1);
3200
3201         FPRReg reg1 = op1.fpr();
3202         FPRReg reg2 = op2.fpr();
3203         m_jit.subDouble(reg1, reg2, result.fpr());
3204
3205         doubleResult(result.fpr(), node);
3206         return;
3207     }
3208
3209     case UntypedUse: {
3210         JSValueOperand left(this, node->child1());
3211         JSValueOperand right(this, node->child2());
3212
3213         JSValueRegs leftRegs = left.jsValueRegs();
3214         JSValueRegs rightRegs = right.jsValueRegs();
3215
3216         FPRTemporary leftNumber(this);
3217         FPRTemporary rightNumber(this);
3218         FPRReg leftFPR = leftNumber.fpr();
3219         FPRReg rightFPR = rightNumber.fpr();
3220
3221 #if USE(JSVALUE64)
3222         GPRTemporary result(this);
3223         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3224         GPRTemporary scratch(this);
3225         GPRReg scratchGPR = scratch.gpr();
3226         FPRReg scratchFPR = InvalidFPRReg;
3227 #else
3228         GPRTemporary resultTag(this);
3229         GPRTemporary resultPayload(this);
3230         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3231         GPRReg scratchGPR = resultTag.gpr();
3232         FPRTemporary fprScratch(this);
3233         FPRReg scratchFPR = fprScratch.fpr();
3234 #endif
3235
3236         SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
3237         SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
3238
3239         JITSubGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3240             leftFPR, rightFPR, scratchGPR, scratchFPR);
3241         gen.generateFastPath(m_jit);
3242
3243         ASSERT(gen.didEmitFastPath());
3244         gen.endJumpList().append(m_jit.jump());
3245
3246         gen.slowPathJumpList().link(&m_jit);
3247         silentSpillAllRegisters(resultRegs);
3248         callOperation(operationValueSub, resultRegs, leftRegs, rightRegs);
3249         silentFillAllRegisters(resultRegs);
3250         m_jit.exceptionCheck();
3251
3252         gen.endJumpList().link(&m_jit);
3253         jsValueResult(resultRegs, node);
3254         return;
3255     }
3256
3257     default:
3258         RELEASE_ASSERT_NOT_REACHED();
3259         return;
3260     }
3261 }
3262
3263 void SpeculativeJIT::compileArithNegate(Node* node)
3264 {
3265     switch (node->child1().useKind()) {
3266     case Int32Use: {
3267         SpeculateInt32Operand op1(this, node->child1());
3268         GPRTemporary result(this);
3269
3270         m_jit.move(op1.gpr(), result.gpr());
3271
3272         // Note: there is no notion of being not used as a number, but someone
3273         // caring about negative zero.
3274         
3275         if (!shouldCheckOverflow(node->arithMode()))
3276             m_jit.neg32(result.gpr());
3277         else if (!shouldCheckNegativeZero(node->arithMode()))
3278             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
3279         else {
3280             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
3281             m_jit.neg32(result.gpr());
3282         }
3283
3284         int32Result(result.gpr(), node);
3285         return;
3286     }
3287
3288 #if USE(JSVALUE64)
3289     case Int52RepUse: {
3290         ASSERT(shouldCheckOverflow(node->arithMode()));
3291         
3292         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)) {
3293             SpeculateWhicheverInt52Operand op1(this, node->child1());
3294             GPRTemporary result(this);
3295             GPRReg op1GPR = op1.gpr();
3296             GPRReg resultGPR = result.gpr();
3297             m_jit.move(op1GPR, resultGPR);
3298             m_jit.neg64(resultGPR);
3299             if (shouldCheckNegativeZero(node->arithMode())) {
3300                 speculationCheck(
3301                     NegativeZero, JSValueRegs(), 0,
3302                     m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3303             }
3304             int52Result(resultGPR, node, op1.format());
3305             return;
3306         }
3307         
3308         SpeculateInt52Operand op1(this, node->child1());
3309         GPRTemporary result(this);
3310         GPRReg op1GPR = op1.gpr();
3311         GPRReg resultGPR = result.gpr();
3312         m_jit.move(op1GPR, resultGPR);
3313         speculationCheck(
3314             Int52Overflow, JSValueRegs(), 0,
3315             m_jit.branchNeg64(MacroAssembler::Overflow, resultGPR));
3316         if (shouldCheckNegativeZero(node->arithMode())) {
3317             speculationCheck(
3318                 NegativeZero, JSValueRegs(), 0,
3319                 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3320         }
3321         int52Result(resultGPR, node);
3322         return;
3323     }
3324 #endif // USE(JSVALUE64)
3325         
3326     case DoubleRepUse: {
3327         SpeculateDoubleOperand op1(this, node->child1());
3328         FPRTemporary result(this);
3329         
3330         m_jit.negateDouble(op1.fpr(), result.fpr());
3331         
3332         doubleResult(result.fpr(), node);
3333         return;
3334     }
3335         
3336     default:
3337         RELEASE_ASSERT_NOT_REACHED();
3338         return;
3339     }
3340 }
3341 void SpeculativeJIT::compileArithMul(Node* node)
3342 {
3343     switch (node->binaryUseKind()) {
3344     case Int32Use: {
3345         SpeculateInt32Operand op1(this, node->child1());
3346         SpeculateInt32Operand op2(this, node->child2());
3347         GPRTemporary result(this);
3348
3349         GPRReg reg1 = op1.gpr();
3350         GPRReg reg2 = op2.gpr();
3351
3352         // We can perform truncated multiplications if we get to this point, because if the
3353         // fixup phase could not prove that it would be safe, it would have turned us into
3354         // a double multiplication.
3355         if (!shouldCheckOverflow(node->arithMode())) {
3356             m_jit.move(reg1, result.gpr());
3357             m_jit.mul32(reg2, result.gpr());
3358         } else {
3359             speculationCheck(
3360                 Overflow, JSValueRegs(), 0,
3361                 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3362         }
3363             
3364         // Check for negative zero, if the users of this node care about such things.
3365         if (shouldCheckNegativeZero(node->arithMode())) {
3366             MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3367             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
3368             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
3369             resultNonZero.link(&m_jit);
3370         }
3371
3372         int32Result(result.gpr(), node);
3373         return;
3374     }
3375     
3376 #if USE(JSVALUE64)   
3377     case Int52RepUse: {
3378         ASSERT(shouldCheckOverflow(node->arithMode()));
3379         
3380         // This is super clever. We want to do an int52 multiplication and check the
3381         // int52 overflow bit. There is no direct hardware support for this, but we do
3382         // have the ability to do an int64 multiplication and check the int64 overflow
3383         // bit. We leverage that. Consider that a, b are int52 numbers inside int64
3384         // registers, with the high 12 bits being sign-extended. We can do:
3385         //
3386         //     (a * (b << 12))
3387         //
3388         // This will give us a left-shifted int52 (value is in high 52 bits, low 16
3389         // bits are zero) plus the int52 overflow bit. I.e. whether this 64-bit
3390         // multiplication overflows is identical to whether the 'a * b' 52-bit
3391         // multiplication overflows.
3392         //
3393         // In our nomenclature, this is:
3394         //
3395         //     strictInt52(a) * int52(b) => int52
3396         //
3397         // That is "strictInt52" means unshifted and "int52" means left-shifted by 16
3398         // bits.
3399         //
3400         // We don't care which of op1 or op2 serves as the left-shifted operand, so
3401         // we just do whatever is more convenient for op1 and have op2 do the
3402         // opposite. This ensures that we do at most one shift.
3403
3404         SpeculateWhicheverInt52Operand op1(this, node->child1());
3405         SpeculateWhicheverInt52Operand op2(this, node->child2(), OppositeShift, op1);
3406         GPRTemporary result(this);
3407         
3408         GPRReg op1GPR = op1.gpr();
3409         GPRReg op2GPR = op2.gpr();
3410         GPRReg resultGPR = result.gpr();
3411         
3412         m_jit.move(op1GPR, resultGPR);
3413         speculationCheck(
3414             Int52Overflow, JSValueRegs(), 0,
3415             m_jit.branchMul64(MacroAssembler::Overflow, op2GPR, resultGPR));
3416         
3417         if (shouldCheckNegativeZero(node->arithMode())) {
3418             MacroAssembler::Jump resultNonZero = m_jit.branchTest64(
3419                 MacroAssembler::NonZero, resultGPR);
3420             speculationCheck(
3421                 NegativeZero, JSValueRegs(), 0,
3422                 m_jit.branch64(MacroAssembler::LessThan, op1GPR, TrustedImm64(0)));
3423             speculationCheck(
3424                 NegativeZero, JSValueRegs(), 0,
3425                 m_jit.branch64(MacroAssembler::LessThan, op2GPR, TrustedImm64(0)));
3426             resultNonZero.link(&m_jit);
3427         }
3428         
3429         int52Result(resultGPR, node);
3430         return;
3431     }
3432 #endif // USE(JSVALUE64)
3433         
3434     case DoubleRepUse: {
3435         SpeculateDoubleOperand op1(this, node->child1());
3436         SpeculateDoubleOperand op2(this, node->child2());
3437         FPRTemporary result(this, op1, op2);
3438         
3439         FPRReg reg1 = op1.fpr();
3440         FPRReg reg2 = op2.fpr();
3441         
3442         m_jit.mulDouble(reg1, reg2, result.fpr());
3443         
3444         doubleResult(result.fpr(), node);
3445         return;
3446     }
3447
3448     case UntypedUse: {
3449         Edge& leftChild = node->child1();
3450         Edge& rightChild = node->child2();
3451
3452         if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3453             JSValueOperand left(this, leftChild);
3454             JSValueOperand right(this, rightChild);
3455             JSValueRegs leftRegs = left.jsValueRegs();
3456             JSValueRegs rightRegs = right.jsValueRegs();
3457 #if USE(JSVALUE64)
3458             GPRTemporary result(this);
3459             JSValueRegs resultRegs = JSValueRegs(result.gpr());
3460 #else
3461             GPRTemporary resultTag(this);
3462             GPRTemporary resultPayload(this);
3463             JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3464 #endif
3465             flushRegisters();
3466             callOperation(operationValueMul, resultRegs, leftRegs, rightRegs);
3467             m_jit.exceptionCheck();
3468
3469             jsValueResult(resultRegs, node);
3470             return;
3471         }
3472
3473         Optional<JSValueOperand> left;
3474         Optional<JSValueOperand> right;
3475
3476         JSValueRegs leftRegs;
3477         JSValueRegs rightRegs;
3478
3479         FPRTemporary leftNumber(this);
3480         FPRTemporary rightNumber(this);
3481         FPRReg leftFPR = leftNumber.fpr();
3482         FPRReg rightFPR = rightNumber.fpr();
3483
3484 #if USE(JSVALUE64)
3485         GPRTemporary result(this);
3486         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3487         GPRTemporary scratch(this);
3488         GPRReg scratchGPR = scratch.gpr();
3489         FPRReg scratchFPR = InvalidFPRReg;
3490 #else
3491         GPRTemporary resultTag(this);
3492         GPRTemporary resultPayload(this);
3493         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3494         GPRReg scratchGPR = resultTag.gpr();
3495         FPRTemporary fprScratch(this);
3496         FPRReg scratchFPR = fprScratch.fpr();
3497 #endif
3498
3499         SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3500         SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3501
3502         if (leftChild->isInt32Constant())
3503             leftOperand.setConstInt32(leftChild->asInt32());
3504         if (rightChild->isInt32Constant())
3505             rightOperand.setConstInt32(rightChild->asInt32());
3506
3507         RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3508
3509         if (!leftOperand.isPositiveConstInt32()) {
3510             left = JSValueOperand(this, leftChild);
3511             leftRegs = left->jsValueRegs();
3512         }
3513         if (!rightOperand.isPositiveConstInt32()) {
3514             right = JSValueOperand(this, rightChild);
3515             rightRegs = right->jsValueRegs();
3516         }
3517
3518         JITMulGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3519             leftFPR, rightFPR, scratchGPR, scratchFPR);
3520         gen.generateFastPath(m_jit);
3521
3522         ASSERT(gen.didEmitFastPath());
3523         gen.endJumpList().append(m_jit.jump());
3524
3525         gen.slowPathJumpList().link(&m_jit);
3526         silentSpillAllRegisters(resultRegs);
3527
3528         if (leftOperand.isPositiveConstInt32()) {
3529             leftRegs = resultRegs;
3530             int64_t leftConst = leftOperand.asConstInt32();
3531             m_jit.moveValue(JSValue(leftConst), leftRegs);
3532         }
3533         if (rightOperand.isPositiveConstInt32()) {
3534             rightRegs = resultRegs;
3535             int64_t rightConst = rightOperand.asConstInt32();
3536             m_jit.moveValue(JSValue(rightConst), rightRegs);
3537         }
3538
3539         callOperation(operationValueMul, resultRegs, leftRegs, rightRegs);
3540
3541         silentFillAllRegisters(resultRegs);
3542         m_jit.exceptionCheck();
3543
3544         gen.endJumpList().link(&m_jit);
3545         jsValueResult(resultRegs, node);
3546         return;
3547     }
3548
3549     default:
3550         RELEASE_ASSERT_NOT_REACHED();
3551         return;
3552     }
3553 }
3554
3555 void SpeculativeJIT::compileArithDiv(Node* node)
3556 {
3557     switch (node->binaryUseKind()) {
3558     case Int32Use: {
3559 #if CPU(X86) || CPU(X86_64)
3560         SpeculateInt32Operand op1(this, node->child1());
3561         SpeculateInt32Operand op2(this, node->child2());
3562         GPRTemporary eax(this, X86Registers::eax);
3563         GPRTemporary edx(this, X86Registers::edx);
3564         GPRReg op1GPR = op1.gpr();
3565         GPRReg op2GPR = op2.gpr();
3566     
3567         GPRReg op2TempGPR;
3568         GPRReg temp;
3569         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3570             op2TempGPR = allocate();
3571             temp = op2TempGPR;
3572         } else {
3573             op2TempGPR = InvalidGPRReg;
3574             if (op1GPR == X86Registers::eax)
3575                 temp = X86Registers::edx;
3576             else
3577                 temp = X86Registers::eax;
3578         }
3579     
3580         ASSERT(temp != op1GPR);
3581         ASSERT(temp != op2GPR);
3582     
3583         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3584     
3585         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3586     
3587         JITCompiler::JumpList done;
3588         if (shouldCheckOverflow(node->arithMode())) {
3589             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3590             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3591         } else {
3592             // This is the case where we convert the result to an int after we're done, and we
3593             // already know that the denominator is either -1 or 0. So, if the denominator is
3594             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3595             // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
3596             // are happy to fall through to a normal division, since we're just dividing
3597             // something by negative 1.
3598         
3599             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3600             m_jit.move(TrustedImm32(0), eax.gpr());
3601             done.append(m_jit.jump());
3602         
3603             notZero.link(&m_jit);
3604             JITCompiler::Jump notNeg2ToThe31 =
3605                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3606             m_jit.zeroExtend32ToPtr(op1GPR, eax.gpr());
3607             done.append(m_jit.jump());
3608         
3609             notNeg2ToThe31.link(&m_jit);
3610         }
3611     
3612         safeDenominator.link(&m_jit);
3613     
3614         // If the user cares about negative zero, then speculate that we're not about
3615         // to produce negative zero.
3616         if (shouldCheckNegativeZero(node->arithMode())) {
3617             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3618             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3619             numeratorNonZero.link(&m_jit);
3620         }
3621     
3622         if (op2TempGPR != InvalidGPRReg) {
3623             m_jit.move(op2GPR, op2TempGPR);
3624             op2GPR = op2TempGPR;
3625         }
3626             
3627         m_jit.move(op1GPR, eax.gpr());
3628         m_jit.x86ConvertToDoubleWord32();
3629         m_jit.x86Div32(op2GPR);
3630             
3631         if (op2TempGPR != InvalidGPRReg)
3632             unlock(op2TempGPR);
3633
3634         // Check that there was no remainder. If there had been, then we'd be obligated to
3635         // produce a double result instead.
3636         if (shouldCheckOverflow(node->arithMode()))
3637             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
3638         
3639         done.link(&m_jit);
3640         int32Result(eax.gpr(), node);
3641 #elif HAVE(ARM_IDIV_INSTRUCTIONS) || CPU(ARM64)
3642         SpeculateInt32Operand op1(this, node->child1());
3643         SpeculateInt32Operand op2(this, node->child2());
3644         GPRReg op1GPR = op1.gpr();
3645         GPRReg op2GPR = op2.gpr();
3646         GPRTemporary quotient(this);
3647         GPRTemporary multiplyAnswer(this);
3648
3649         // If the user cares about negative zero, then speculate that we're not about
3650         // to produce negative zero.
3651         if (shouldCheckNegativeZero(node->arithMode())) {
3652             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3653             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3654             numeratorNonZero.link(&m_jit);
3655         }
3656
3657         if (shouldCheckOverflow(node->arithMode()))
3658             speculationCheck(Overflow, JSValueRegs(), nullptr, m_jit.branchTest32(MacroAssembler::Zero, op2GPR));
3659
3660         m_jit.assembler().sdiv<32>(quotient.gpr(), op1GPR, op2GPR);
3661
3662         // Check that there was no remainder. If there had been, then we'd be obligated to
3663         // produce a double result instead.
3664         if (shouldCheckOverflow(node->arithMode())) {
3665             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
3666             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
3667         }
3668
3669         int32Result(quotient.gpr(), node);
3670 #else
3671         RELEASE_ASSERT_NOT_REACHED();
3672 #endif
3673         break;
3674     }
3675         
3676     case DoubleRepUse: {
3677         SpeculateDoubleOperand op1(this, node->child1());
3678         SpeculateDoubleOperand op2(this, node->child2());
3679         FPRTemporary result(this, op1);
3680         
3681         FPRReg reg1 = op1.fpr();
3682         FPRReg reg2 = op2.fpr();
3683         m_jit.divDouble(reg1, reg2, result.fpr());
3684         
3685         doubleResult(result.fpr(), node);
3686         break;
3687     }
3688         
3689     default:
3690         RELEASE_ASSERT_NOT_REACHED();
3691         break;
3692     }
3693 }
3694
3695 void SpeculativeJIT::compileArithMod(Node* node)
3696 {
3697     switch (node->binaryUseKind()) {
3698     case Int32Use: {
3699         // In the fast path, the dividend value could be the final result
3700         // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
3701         SpeculateStrictInt32Operand op1(this, node->child1());
3702         
3703         if (node->child2()->isInt32Constant()) {
3704             int32_t divisor = node->child2()->asInt32();
3705             if (divisor > 1 && hasOneBitSet(divisor)) {
3706                 unsigned logarithm = WTF::fastLog2(static_cast<uint32_t>(divisor));
3707                 GPRReg dividendGPR = op1.gpr();
3708                 GPRTemporary result(this);
3709                 GPRReg resultGPR = result.gpr();
3710
3711                 // This is what LLVM generates. It's pretty crazy. Here's my
3712                 // attempt at understanding it.
3713                 
3714                 // First, compute either divisor - 1, or 0, depending on whether
3715                 // the dividend is negative:
3716                 //
3717                 // If dividend < 0:  resultGPR = divisor - 1
3718                 // If dividend >= 0: resultGPR = 0
3719                 m_jit.move(dividendGPR, resultGPR);
3720                 m_jit.rshift32(TrustedImm32(31), resultGPR);
3721                 m_jit.urshift32(TrustedImm32(32 - logarithm), resultGPR);
3722                 
3723                 // Add in the dividend, so that:
3724                 //
3725                 // If dividend < 0:  resultGPR = dividend + divisor - 1
3726                 // If dividend >= 0: resultGPR = dividend
3727                 m_jit.add32(dividendGPR, resultGPR);
3728                 
3729                 // Mask so as to only get the *high* bits. This rounds down
3730                 // (towards negative infinity) resultGPR to the nearest multiple
3731                 // of divisor, so that:
3732                 //
3733                 // If dividend < 0:  resultGPR = floor((dividend + divisor - 1) / divisor)
3734                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3735                 //
3736                 // Note that this can be simplified to:
3737                 //
3738                 // If dividend < 0:  resultGPR = ceil(dividend / divisor)
3739                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3740                 //
3741                 // Note that if the dividend is negative, resultGPR will also be negative.
3742                 // Regardless of the sign of dividend, resultGPR will be rounded towards
3743                 // zero, because of how things are conditionalized.
3744                 m_jit.and32(TrustedImm32(-divisor), resultGPR);
3745                 
3746                 // Subtract resultGPR from dividendGPR, which yields the remainder:
3747                 //
3748                 // resultGPR = dividendGPR - resultGPR
3749                 m_jit.neg32(resultGPR);
3750                 m_jit.add32(dividendGPR, resultGPR);
3751                 
3752                 if (shouldCheckNegativeZero(node->arithMode())) {
3753                     // Check that we're not about to create negative zero.
3754                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3755                     speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, resultGPR));
3756                     numeratorPositive.link(&m_jit);
3757                 }
3758
3759                 int32Result(resultGPR, node);
3760                 return;
3761             }
3762         }
3763         
3764 #if CPU(X86) || CPU(X86_64)
3765         if (node->child2()->isInt32Constant()) {
3766             int32_t divisor = node->child2()->asInt32();
3767             if (divisor && divisor != -1) {
3768                 GPRReg op1Gpr = op1.gpr();
3769
3770                 GPRTemporary eax(this, X86Registers::eax);
3771                 GPRTemporary edx(this, X86Registers::edx);
3772                 GPRTemporary scratch(this);
3773                 GPRReg scratchGPR = scratch.gpr();
3774
3775                 GPRReg op1SaveGPR;
3776                 if (op1Gpr == X86Registers::eax || op1Gpr == X86Registers::edx) {
3777                     op1SaveGPR = allocate();
3778                     ASSERT(op1Gpr != op1SaveGPR);
3779                     m_jit.move(op1Gpr, op1SaveGPR);
3780                 } else
3781                     op1SaveGPR = op1Gpr;
3782                 ASSERT(op1SaveGPR != X86Registers::eax);
3783                 ASSERT(op1SaveGPR != X86Registers::edx);
3784
3785                 m_jit.move(op1Gpr, eax.gpr());
3786                 m_jit.move(TrustedImm32(divisor), scratchGPR);
3787                 m_jit.x86ConvertToDoubleWord32();
3788                 m_jit.x86Div32(scratchGPR);
3789                 if (shouldCheckNegativeZero(node->arithMode())) {
3790                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3791                     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3792                     numeratorPositive.link(&m_jit);
3793                 }
3794             
3795                 if (op1SaveGPR != op1Gpr)
3796                     unlock(op1SaveGPR);
3797
3798                 int32Result(edx.gpr(), node);
3799                 return;
3800             }
3801         }
3802 #endif
3803
3804         SpeculateInt32Operand op2(this, node->child2());
3805 #if CPU(X86) || CPU(X86_64)
3806         GPRTemporary eax(this, X86Registers::eax);
3807         GPRTemporary edx(this, X86Registers::edx);
3808         GPRReg op1GPR = op