71fa605b894fb959ebb46dbe06646c2c390dfd1e
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGOSRExitFuzz.h"
38 #include "DFGSaneStringGetByValSlowPathGenerator.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DirectArguments.h"
41 #include "JSArrowFunction.h"
42 #include "JSCInlines.h"
43 #include "JSEnvironmentRecord.h"
44 #include "JSLexicalEnvironment.h"
45 #include "LinkBuffer.h"
46 #include "ScopedArguments.h"
47 #include "ScratchRegisterAllocator.h"
48 #include "WriteBarrierBuffer.h"
49 #include <wtf/MathExtras.h>
50
51 namespace JSC { namespace DFG {
52
53 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
54     : m_compileOkay(true)
55     , m_jit(jit)
56     , m_currentNode(0)
57     , m_lastGeneratedNode(LastNodeType)
58     , m_indexInBlock(0)
59     , m_generationInfo(m_jit.graph().frameRegisterCount())
60     , m_state(m_jit.graph())
61     , m_interpreter(m_jit.graph(), m_state)
62     , m_stream(&jit.jitCode()->variableEventStream)
63     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
64     , m_isCheckingArgumentTypes(false)
65 {
66 }
67
68 SpeculativeJIT::~SpeculativeJIT()
69 {
70 }
71
72 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
73 {
74     ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
75     
76     GPRTemporary scratch(this);
77     GPRTemporary scratch2(this);
78     GPRReg scratchGPR = scratch.gpr();
79     GPRReg scratch2GPR = scratch2.gpr();
80     
81     unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
82     
83     JITCompiler::JumpList slowCases;
84     
85     slowCases.append(
86         emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
87     m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
88     emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
89     
90     m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
91     m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
92     
93     if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
94 #if USE(JSVALUE64)
95         m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
96         for (unsigned i = numElements; i < vectorLength; ++i)
97             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
98 #else
99         EncodedValueDescriptor value;
100         value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
101         for (unsigned i = numElements; i < vectorLength; ++i) {
102             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
103             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
104         }
105 #endif
106     }
107     
108     // I want a slow path that also loads out the storage pointer, and that's
109     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
110     // of work for a very small piece of functionality. :-/
111     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
112         slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
113         structure, numElements));
114 }
115
116 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
117 {
118     if (inlineCallFrame && !inlineCallFrame->isVarargs())
119         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
120     else {
121         VirtualRegister argumentCountRegister;
122         if (!inlineCallFrame)
123             argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
124         else
125             argumentCountRegister = inlineCallFrame->argumentCountRegister;
126         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
127         if (!includeThis)
128             m_jit.sub32(TrustedImm32(1), lengthGPR);
129     }
130 }
131
132 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
133 {
134     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
135 }
136
137 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
138 {
139     if (origin.inlineCallFrame) {
140         if (origin.inlineCallFrame->isClosureCall) {
141             m_jit.loadPtr(
142                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
143                 calleeGPR);
144         } else {
145             m_jit.move(
146                 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
147                 calleeGPR);
148         }
149     } else
150         m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), calleeGPR);
151 }
152
153 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
154 {
155     m_jit.addPtr(
156         TrustedImm32(
157             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
158         GPRInfo::callFrameRegister, startGPR);
159 }
160
161 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
162 {
163     if (!doOSRExitFuzzing())
164         return MacroAssembler::Jump();
165     
166     MacroAssembler::Jump result;
167     
168     m_jit.pushToSave(GPRInfo::regT0);
169     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
170     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
171     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
172     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
173     unsigned at = Options::fireOSRExitFuzzAt();
174     if (at || atOrAfter) {
175         unsigned threshold;
176         MacroAssembler::RelationalCondition condition;
177         if (atOrAfter) {
178             threshold = atOrAfter;
179             condition = MacroAssembler::Below;
180         } else {
181             threshold = at;
182             condition = MacroAssembler::NotEqual;
183         }
184         MacroAssembler::Jump ok = m_jit.branch32(
185             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
186         m_jit.popToRestore(GPRInfo::regT0);
187         result = m_jit.jump();
188         ok.link(&m_jit);
189     }
190     m_jit.popToRestore(GPRInfo::regT0);
191     
192     return result;
193 }
194
195 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
196 {
197     if (!m_compileOkay)
198         return;
199     ASSERT(m_isCheckingArgumentTypes || m_canExit);
200     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
201     if (fuzzJump.isSet()) {
202         JITCompiler::JumpList jumpsToFail;
203         jumpsToFail.append(fuzzJump);
204         jumpsToFail.append(jumpToFail);
205         m_jit.appendExitInfo(jumpsToFail);
206     } else
207         m_jit.appendExitInfo(jumpToFail);
208     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
209 }
210
211 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
212 {
213     if (!m_compileOkay)
214         return;
215     ASSERT(m_isCheckingArgumentTypes || m_canExit);
216     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
217     if (fuzzJump.isSet()) {
218         JITCompiler::JumpList myJumpsToFail;
219         myJumpsToFail.append(jumpsToFail);
220         myJumpsToFail.append(fuzzJump);
221         m_jit.appendExitInfo(myJumpsToFail);
222     } else
223         m_jit.appendExitInfo(jumpsToFail);
224     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
225 }
226
227 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
228 {
229     if (!m_compileOkay)
230         return OSRExitJumpPlaceholder();
231     ASSERT(m_isCheckingArgumentTypes || m_canExit);
232     unsigned index = m_jit.jitCode()->osrExit.size();
233     m_jit.appendExitInfo();
234     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
235     return OSRExitJumpPlaceholder(index);
236 }
237
238 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
239 {
240     ASSERT(m_isCheckingArgumentTypes || m_canExit);
241     return speculationCheck(kind, jsValueSource, nodeUse.node());
242 }
243
244 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
245 {
246     ASSERT(m_isCheckingArgumentTypes || m_canExit);
247     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
248 }
249
250 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
251 {
252     ASSERT(m_isCheckingArgumentTypes || m_canExit);
253     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
254 }
255
256 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
257 {
258     if (!m_compileOkay)
259         return;
260     ASSERT(m_isCheckingArgumentTypes || m_canExit);
261     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
262     m_jit.appendExitInfo(jumpToFail);
263     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
264 }
265
266 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
267 {
268     ASSERT(m_isCheckingArgumentTypes || m_canExit);
269     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
270 }
271
272 void SpeculativeJIT::emitInvalidationPoint(Node* node)
273 {
274     if (!m_compileOkay)
275         return;
276     ASSERT(m_canExit);
277     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
278     m_jit.jitCode()->appendOSRExit(OSRExit(
279         UncountableInvalidation, JSValueSource(),
280         m_jit.graph().methodOfGettingAValueProfileFor(node),
281         this, m_stream->size()));
282     info.m_replacementSource = m_jit.watchpointLabel();
283     ASSERT(info.m_replacementSource.isSet());
284     noResult(node);
285 }
286
287 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
288 {
289     ASSERT(m_isCheckingArgumentTypes || m_canExit);
290     if (!m_compileOkay)
291         return;
292     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
293     m_compileOkay = false;
294     if (verboseCompilationEnabled())
295         dataLog("Bailing compilation.\n");
296 }
297
298 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
299 {
300     ASSERT(m_isCheckingArgumentTypes || m_canExit);
301     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
302 }
303
304 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
305 {
306     ASSERT(needsTypeCheck(edge, typesPassedThrough));
307     m_interpreter.filter(edge, typesPassedThrough);
308     speculationCheck(BadType, source, edge.node(), jumpToFail);
309 }
310
311 RegisterSet SpeculativeJIT::usedRegisters()
312 {
313     RegisterSet result;
314     
315     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
316         GPRReg gpr = GPRInfo::toRegister(i);
317         if (m_gprs.isInUse(gpr))
318             result.set(gpr);
319     }
320     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
321         FPRReg fpr = FPRInfo::toRegister(i);
322         if (m_fprs.isInUse(fpr))
323             result.set(fpr);
324     }
325     
326     result.merge(RegisterSet::specialRegisters());
327     
328     return result;
329 }
330
331 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
332 {
333     m_slowPathGenerators.append(WTF::move(slowPathGenerator));
334 }
335
336 void SpeculativeJIT::runSlowPathGenerators()
337 {
338     for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
339         m_slowPathGenerators[i]->generate(this);
340 }
341
342 // On Windows we need to wrap fmod; on other platforms we can call it directly.
343 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
344 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
345 static double JIT_OPERATION fmodAsDFGOperation(double x, double y)
346 {
347     return fmod(x, y);
348 }
349 #else
350 #define fmodAsDFGOperation fmod
351 #endif
352
353 void SpeculativeJIT::clearGenerationInfo()
354 {
355     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
356         m_generationInfo[i] = GenerationInfo();
357     m_gprs = RegisterBank<GPRInfo>();
358     m_fprs = RegisterBank<FPRInfo>();
359 }
360
361 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
362 {
363     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
364     Node* node = info.node();
365     DataFormat registerFormat = info.registerFormat();
366     ASSERT(registerFormat != DataFormatNone);
367     ASSERT(registerFormat != DataFormatDouble);
368         
369     SilentSpillAction spillAction;
370     SilentFillAction fillAction;
371         
372     if (!info.needsSpill())
373         spillAction = DoNothingForSpill;
374     else {
375 #if USE(JSVALUE64)
376         ASSERT(info.gpr() == source);
377         if (registerFormat == DataFormatInt32)
378             spillAction = Store32Payload;
379         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
380             spillAction = StorePtr;
381         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
382             spillAction = Store64;
383         else {
384             ASSERT(registerFormat & DataFormatJS);
385             spillAction = Store64;
386         }
387 #elif USE(JSVALUE32_64)
388         if (registerFormat & DataFormatJS) {
389             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
390             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
391         } else {
392             ASSERT(info.gpr() == source);
393             spillAction = Store32Payload;
394         }
395 #endif
396     }
397         
398     if (registerFormat == DataFormatInt32) {
399         ASSERT(info.gpr() == source);
400         ASSERT(isJSInt32(info.registerFormat()));
401         if (node->hasConstant()) {
402             ASSERT(node->isInt32Constant());
403             fillAction = SetInt32Constant;
404         } else
405             fillAction = Load32Payload;
406     } else if (registerFormat == DataFormatBoolean) {
407 #if USE(JSVALUE64)
408         RELEASE_ASSERT_NOT_REACHED();
409 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
410         fillAction = DoNothingForFill;
411 #endif
412 #elif USE(JSVALUE32_64)
413         ASSERT(info.gpr() == source);
414         if (node->hasConstant()) {
415             ASSERT(node->isBooleanConstant());
416             fillAction = SetBooleanConstant;
417         } else
418             fillAction = Load32Payload;
419 #endif
420     } else if (registerFormat == DataFormatCell) {
421         ASSERT(info.gpr() == source);
422         if (node->hasConstant()) {
423             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
424             node->asCell(); // To get the assertion.
425             fillAction = SetCellConstant;
426         } else {
427 #if USE(JSVALUE64)
428             fillAction = LoadPtr;
429 #else
430             fillAction = Load32Payload;
431 #endif
432         }
433     } else if (registerFormat == DataFormatStorage) {
434         ASSERT(info.gpr() == source);
435         fillAction = LoadPtr;
436     } else if (registerFormat == DataFormatInt52) {
437         if (node->hasConstant())
438             fillAction = SetInt52Constant;
439         else if (info.spillFormat() == DataFormatInt52)
440             fillAction = Load64;
441         else if (info.spillFormat() == DataFormatStrictInt52)
442             fillAction = Load64ShiftInt52Left;
443         else if (info.spillFormat() == DataFormatNone)
444             fillAction = Load64;
445         else {
446             RELEASE_ASSERT_NOT_REACHED();
447 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
448             fillAction = Load64; // Make GCC happy.
449 #endif
450         }
451     } else if (registerFormat == DataFormatStrictInt52) {
452         if (node->hasConstant())
453             fillAction = SetStrictInt52Constant;
454         else if (info.spillFormat() == DataFormatInt52)
455             fillAction = Load64ShiftInt52Right;
456         else if (info.spillFormat() == DataFormatStrictInt52)
457             fillAction = Load64;
458         else if (info.spillFormat() == DataFormatNone)
459             fillAction = Load64;
460         else {
461             RELEASE_ASSERT_NOT_REACHED();
462 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
463             fillAction = Load64; // Make GCC happy.
464 #endif
465         }
466     } else {
467         ASSERT(registerFormat & DataFormatJS);
468 #if USE(JSVALUE64)
469         ASSERT(info.gpr() == source);
470         if (node->hasConstant()) {
471             if (node->isCellConstant())
472                 fillAction = SetTrustedJSConstant;
473             else
474                 fillAction = SetJSConstant;
475         } else if (info.spillFormat() == DataFormatInt32) {
476             ASSERT(registerFormat == DataFormatJSInt32);
477             fillAction = Load32PayloadBoxInt;
478         } else
479             fillAction = Load64;
480 #else
481         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
482         if (node->hasConstant())
483             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
484         else if (info.payloadGPR() == source)
485             fillAction = Load32Payload;
486         else { // Fill the Tag
487             switch (info.spillFormat()) {
488             case DataFormatInt32:
489                 ASSERT(registerFormat == DataFormatJSInt32);
490                 fillAction = SetInt32Tag;
491                 break;
492             case DataFormatCell:
493                 ASSERT(registerFormat == DataFormatJSCell);
494                 fillAction = SetCellTag;
495                 break;
496             case DataFormatBoolean:
497                 ASSERT(registerFormat == DataFormatJSBoolean);
498                 fillAction = SetBooleanTag;
499                 break;
500             default:
501                 fillAction = Load32Tag;
502                 break;
503             }
504         }
505 #endif
506     }
507         
508     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
509 }
510     
511 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
512 {
513     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
514     Node* node = info.node();
515     ASSERT(info.registerFormat() == DataFormatDouble);
516
517     SilentSpillAction spillAction;
518     SilentFillAction fillAction;
519         
520     if (!info.needsSpill())
521         spillAction = DoNothingForSpill;
522     else {
523         ASSERT(!node->hasConstant());
524         ASSERT(info.spillFormat() == DataFormatNone);
525         ASSERT(info.fpr() == source);
526         spillAction = StoreDouble;
527     }
528         
529 #if USE(JSVALUE64)
530     if (node->hasConstant()) {
531         node->asNumber(); // To get the assertion.
532         fillAction = SetDoubleConstant;
533     } else {
534         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
535         fillAction = LoadDouble;
536     }
537 #elif USE(JSVALUE32_64)
538     ASSERT(info.registerFormat() == DataFormatDouble);
539     if (node->hasConstant()) {
540         node->asNumber(); // To get the assertion.
541         fillAction = SetDoubleConstant;
542     } else
543         fillAction = LoadDouble;
544 #endif
545
546     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
547 }
548     
549 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
550 {
551     switch (plan.spillAction()) {
552     case DoNothingForSpill:
553         break;
554     case Store32Tag:
555         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
556         break;
557     case Store32Payload:
558         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
559         break;
560     case StorePtr:
561         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
562         break;
563 #if USE(JSVALUE64)
564     case Store64:
565         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
566         break;
567 #endif
568     case StoreDouble:
569         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
570         break;
571     default:
572         RELEASE_ASSERT_NOT_REACHED();
573     }
574 }
575     
576 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
577 {
578 #if USE(JSVALUE32_64)
579     UNUSED_PARAM(canTrample);
580 #endif
581     switch (plan.fillAction()) {
582     case DoNothingForFill:
583         break;
584     case SetInt32Constant:
585         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
586         break;
587 #if USE(JSVALUE64)
588     case SetInt52Constant:
589         m_jit.move(Imm64(plan.node()->asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
590         break;
591     case SetStrictInt52Constant:
592         m_jit.move(Imm64(plan.node()->asMachineInt()), plan.gpr());
593         break;
594 #endif // USE(JSVALUE64)
595     case SetBooleanConstant:
596         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
597         break;
598     case SetCellConstant:
599         m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
600         break;
601 #if USE(JSVALUE64)
602     case SetTrustedJSConstant:
603         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
604         break;
605     case SetJSConstant:
606         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
607         break;
608     case SetDoubleConstant:
609         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
610         m_jit.move64ToDouble(canTrample, plan.fpr());
611         break;
612     case Load32PayloadBoxInt:
613         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
614         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
615         break;
616     case Load32PayloadConvertToInt52:
617         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
618         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
619         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
620         break;
621     case Load32PayloadSignExtend:
622         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
623         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
624         break;
625 #else
626     case SetJSConstantTag:
627         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
628         break;
629     case SetJSConstantPayload:
630         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
631         break;
632     case SetInt32Tag:
633         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
634         break;
635     case SetCellTag:
636         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
637         break;
638     case SetBooleanTag:
639         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
640         break;
641     case SetDoubleConstant:
642         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
643         break;
644 #endif
645     case Load32Tag:
646         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
647         break;
648     case Load32Payload:
649         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
650         break;
651     case LoadPtr:
652         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
653         break;
654 #if USE(JSVALUE64)
655     case Load64:
656         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
657         break;
658     case Load64ShiftInt52Right:
659         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
660         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
661         break;
662     case Load64ShiftInt52Left:
663         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
664         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
665         break;
666 #endif
667     case LoadDouble:
668         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
669         break;
670     default:
671         RELEASE_ASSERT_NOT_REACHED();
672     }
673 }
674     
675 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
676 {
677     switch (arrayMode.arrayClass()) {
678     case Array::OriginalArray: {
679         CRASH();
680 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
681         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
682         return result;
683 #endif
684     }
685         
686     case Array::Array:
687         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
688         return m_jit.branch32(
689             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
690         
691     case Array::NonArray:
692     case Array::OriginalNonArray:
693         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
694         return m_jit.branch32(
695             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
696         
697     case Array::PossiblyArray:
698         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
699         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
700     }
701     
702     RELEASE_ASSERT_NOT_REACHED();
703     return JITCompiler::Jump();
704 }
705
706 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
707 {
708     JITCompiler::JumpList result;
709     
710     switch (arrayMode.type()) {
711     case Array::Int32:
712         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
713
714     case Array::Double:
715         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
716
717     case Array::Contiguous:
718         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
719
720     case Array::Undecided:
721         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
722
723     case Array::ArrayStorage:
724     case Array::SlowPutArrayStorage: {
725         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
726         
727         if (arrayMode.isJSArray()) {
728             if (arrayMode.isSlowPut()) {
729                 result.append(
730                     m_jit.branchTest32(
731                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
732                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
733                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
734                 result.append(
735                     m_jit.branch32(
736                         MacroAssembler::Above, tempGPR,
737                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
738                 break;
739             }
740             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
741             result.append(
742                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
743             break;
744         }
745         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
746         if (arrayMode.isSlowPut()) {
747             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
748             result.append(
749                 m_jit.branch32(
750                     MacroAssembler::Above, tempGPR,
751                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
752             break;
753         }
754         result.append(
755             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
756         break;
757     }
758     default:
759         CRASH();
760         break;
761     }
762     
763     return result;
764 }
765
766 void SpeculativeJIT::checkArray(Node* node)
767 {
768     ASSERT(node->arrayMode().isSpecific());
769     ASSERT(!node->arrayMode().doesConversion());
770     
771     SpeculateCellOperand base(this, node->child1());
772     GPRReg baseReg = base.gpr();
773     
774     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
775         noResult(m_currentNode);
776         return;
777     }
778     
779     const ClassInfo* expectedClassInfo = 0;
780     
781     switch (node->arrayMode().type()) {
782     case Array::String:
783         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
784         break;
785     case Array::Int32:
786     case Array::Double:
787     case Array::Contiguous:
788     case Array::Undecided:
789     case Array::ArrayStorage:
790     case Array::SlowPutArrayStorage: {
791         GPRTemporary temp(this);
792         GPRReg tempGPR = temp.gpr();
793         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
794         speculationCheck(
795             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
796             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
797         
798         noResult(m_currentNode);
799         return;
800     }
801     case Array::DirectArguments:
802         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
803         noResult(m_currentNode);
804         return;
805     case Array::ScopedArguments:
806         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
807         noResult(m_currentNode);
808         return;
809     default:
810         speculateCellTypeWithoutTypeFiltering(
811             node->child1(), baseReg,
812             typeForTypedArrayType(node->arrayMode().typedArrayType()));
813         noResult(m_currentNode);
814         return;
815     }
816     
817     RELEASE_ASSERT(expectedClassInfo);
818     
819     GPRTemporary temp(this);
820     GPRTemporary temp2(this);
821     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
822     speculationCheck(
823         BadType, JSValueSource::unboxedCell(baseReg), node,
824         m_jit.branchPtr(
825             MacroAssembler::NotEqual,
826             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
827             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
828     
829     noResult(m_currentNode);
830 }
831
832 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
833 {
834     ASSERT(node->arrayMode().doesConversion());
835     
836     GPRTemporary temp(this);
837     GPRTemporary structure;
838     GPRReg tempGPR = temp.gpr();
839     GPRReg structureGPR = InvalidGPRReg;
840     
841     if (node->op() != ArrayifyToStructure) {
842         GPRTemporary realStructure(this);
843         structure.adopt(realStructure);
844         structureGPR = structure.gpr();
845     }
846         
847     // We can skip all that comes next if we already have array storage.
848     MacroAssembler::JumpList slowPath;
849     
850     if (node->op() == ArrayifyToStructure) {
851         slowPath.append(m_jit.branchWeakStructure(
852             JITCompiler::NotEqual,
853             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
854             node->structure()));
855     } else {
856         m_jit.load8(
857             MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
858         
859         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
860     }
861     
862     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
863         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
864     
865     noResult(m_currentNode);
866 }
867
868 void SpeculativeJIT::arrayify(Node* node)
869 {
870     ASSERT(node->arrayMode().isSpecific());
871     
872     SpeculateCellOperand base(this, node->child1());
873     
874     if (!node->child2()) {
875         arrayify(node, base.gpr(), InvalidGPRReg);
876         return;
877     }
878     
879     SpeculateInt32Operand property(this, node->child2());
880     
881     arrayify(node, base.gpr(), property.gpr());
882 }
883
884 GPRReg SpeculativeJIT::fillStorage(Edge edge)
885 {
886     VirtualRegister virtualRegister = edge->virtualRegister();
887     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
888     
889     switch (info.registerFormat()) {
890     case DataFormatNone: {
891         if (info.spillFormat() == DataFormatStorage) {
892             GPRReg gpr = allocate();
893             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
894             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
895             info.fillStorage(*m_stream, gpr);
896             return gpr;
897         }
898         
899         // Must be a cell; fill it as a cell and then return the pointer.
900         return fillSpeculateCell(edge);
901     }
902         
903     case DataFormatStorage: {
904         GPRReg gpr = info.gpr();
905         m_gprs.lock(gpr);
906         return gpr;
907     }
908         
909     default:
910         return fillSpeculateCell(edge);
911     }
912 }
913
914 void SpeculativeJIT::useChildren(Node* node)
915 {
916     if (node->flags() & NodeHasVarArgs) {
917         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
918             if (!!m_jit.graph().m_varArgChildren[childIdx])
919                 use(m_jit.graph().m_varArgChildren[childIdx]);
920         }
921     } else {
922         Edge child1 = node->child1();
923         if (!child1) {
924             ASSERT(!node->child2() && !node->child3());
925             return;
926         }
927         use(child1);
928         
929         Edge child2 = node->child2();
930         if (!child2) {
931             ASSERT(!node->child3());
932             return;
933         }
934         use(child2);
935         
936         Edge child3 = node->child3();
937         if (!child3)
938             return;
939         use(child3);
940     }
941 }
942
943 void SpeculativeJIT::compileIn(Node* node)
944 {
945     SpeculateCellOperand base(this, node->child2());
946     GPRReg baseGPR = base.gpr();
947     
948     if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
949         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
950             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo();
951             
952             GPRTemporary result(this);
953             GPRReg resultGPR = result.gpr();
954
955             use(node->child1());
956             
957             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
958             MacroAssembler::Label done = m_jit.label();
959             
960             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
961             // we can cast it to const AtomicStringImpl* safely.
962             auto slowPath = slowPathCall(
963                 jump.m_jump, this, operationInOptimize,
964                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
965                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
966             
967             stubInfo->codeOrigin = node->origin.semantic;
968             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
969             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
970             stubInfo->patch.usedRegisters = usedRegisters();
971             stubInfo->patch.spillMode = NeedToSpill;
972
973             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
974             addSlowPathGenerator(WTF::move(slowPath));
975
976             base.use();
977
978             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
979             return;
980         }
981     }
982
983     JSValueOperand key(this, node->child1());
984     JSValueRegs regs = key.jsValueRegs();
985         
986     GPRFlushedCallResult result(this);
987     GPRReg resultGPR = result.gpr();
988         
989     base.use();
990     key.use();
991         
992     flushRegisters();
993     callOperation(
994         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
995         baseGPR, regs);
996     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
997 }
998
999 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1000 {
1001     unsigned branchIndexInBlock = detectPeepHoleBranch();
1002     if (branchIndexInBlock != UINT_MAX) {
1003         Node* branchNode = m_block->at(branchIndexInBlock);
1004
1005         ASSERT(node->adjustedRefCount() == 1);
1006         
1007         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1008     
1009         m_indexInBlock = branchIndexInBlock;
1010         m_currentNode = branchNode;
1011         
1012         return true;
1013     }
1014     
1015     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1016     
1017     return false;
1018 }
1019
1020 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1021 {
1022     unsigned branchIndexInBlock = detectPeepHoleBranch();
1023     if (branchIndexInBlock != UINT_MAX) {
1024         Node* branchNode = m_block->at(branchIndexInBlock);
1025
1026         ASSERT(node->adjustedRefCount() == 1);
1027         
1028         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1029     
1030         m_indexInBlock = branchIndexInBlock;
1031         m_currentNode = branchNode;
1032         
1033         return true;
1034     }
1035     
1036     nonSpeculativeNonPeepholeStrictEq(node, invert);
1037     
1038     return false;
1039 }
1040
1041 static const char* dataFormatString(DataFormat format)
1042 {
1043     // These values correspond to the DataFormat enum.
1044     const char* strings[] = {
1045         "[  ]",
1046         "[ i]",
1047         "[ d]",
1048         "[ c]",
1049         "Err!",
1050         "Err!",
1051         "Err!",
1052         "Err!",
1053         "[J ]",
1054         "[Ji]",
1055         "[Jd]",
1056         "[Jc]",
1057         "Err!",
1058         "Err!",
1059         "Err!",
1060         "Err!",
1061     };
1062     return strings[format];
1063 }
1064
1065 void SpeculativeJIT::dump(const char* label)
1066 {
1067     if (label)
1068         dataLogF("<%s>\n", label);
1069
1070     dataLogF("  gprs:\n");
1071     m_gprs.dump();
1072     dataLogF("  fprs:\n");
1073     m_fprs.dump();
1074     dataLogF("  VirtualRegisters:\n");
1075     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1076         GenerationInfo& info = m_generationInfo[i];
1077         if (info.alive())
1078             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1079         else
1080             dataLogF("    % 3d:[__][__]", i);
1081         if (info.registerFormat() == DataFormatDouble)
1082             dataLogF(":fpr%d\n", info.fpr());
1083         else if (info.registerFormat() != DataFormatNone
1084 #if USE(JSVALUE32_64)
1085             && !(info.registerFormat() & DataFormatJS)
1086 #endif
1087             ) {
1088             ASSERT(info.gpr() != InvalidGPRReg);
1089             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1090         } else
1091             dataLogF("\n");
1092     }
1093     if (label)
1094         dataLogF("</%s>\n", label);
1095 }
1096
1097 GPRTemporary::GPRTemporary()
1098     : m_jit(0)
1099     , m_gpr(InvalidGPRReg)
1100 {
1101 }
1102
1103 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1104     : m_jit(jit)
1105     , m_gpr(InvalidGPRReg)
1106 {
1107     m_gpr = m_jit->allocate();
1108 }
1109
1110 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1111     : m_jit(jit)
1112     , m_gpr(InvalidGPRReg)
1113 {
1114     m_gpr = m_jit->allocate(specific);
1115 }
1116
1117 #if USE(JSVALUE32_64)
1118 GPRTemporary::GPRTemporary(
1119     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1120     : m_jit(jit)
1121     , m_gpr(InvalidGPRReg)
1122 {
1123     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1124         m_gpr = m_jit->reuse(op1.gpr(which));
1125     else
1126         m_gpr = m_jit->allocate();
1127 }
1128 #endif // USE(JSVALUE32_64)
1129
1130 JSValueRegsTemporary::JSValueRegsTemporary() { }
1131
1132 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1133 #if USE(JSVALUE64)
1134     : m_gpr(jit)
1135 #else
1136     : m_payloadGPR(jit)
1137     , m_tagGPR(jit)
1138 #endif
1139 {
1140 }
1141
1142 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1143
1144 JSValueRegs JSValueRegsTemporary::regs()
1145 {
1146 #if USE(JSVALUE64)
1147     return JSValueRegs(m_gpr.gpr());
1148 #else
1149     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1150 #endif
1151 }
1152
1153 void GPRTemporary::adopt(GPRTemporary& other)
1154 {
1155     ASSERT(!m_jit);
1156     ASSERT(m_gpr == InvalidGPRReg);
1157     ASSERT(other.m_jit);
1158     ASSERT(other.m_gpr != InvalidGPRReg);
1159     m_jit = other.m_jit;
1160     m_gpr = other.m_gpr;
1161     other.m_jit = 0;
1162     other.m_gpr = InvalidGPRReg;
1163 }
1164
1165 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1166     : m_jit(jit)
1167     , m_fpr(InvalidFPRReg)
1168 {
1169     m_fpr = m_jit->fprAllocate();
1170 }
1171
1172 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1173     : m_jit(jit)
1174     , m_fpr(InvalidFPRReg)
1175 {
1176     if (m_jit->canReuse(op1.node()))
1177         m_fpr = m_jit->reuse(op1.fpr());
1178     else
1179         m_fpr = m_jit->fprAllocate();
1180 }
1181
1182 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1183     : m_jit(jit)
1184     , m_fpr(InvalidFPRReg)
1185 {
1186     if (m_jit->canReuse(op1.node()))
1187         m_fpr = m_jit->reuse(op1.fpr());
1188     else if (m_jit->canReuse(op2.node()))
1189         m_fpr = m_jit->reuse(op2.fpr());
1190     else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1191         m_fpr = m_jit->reuse(op1.fpr());
1192     else
1193         m_fpr = m_jit->fprAllocate();
1194 }
1195
1196 #if USE(JSVALUE32_64)
1197 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1198     : m_jit(jit)
1199     , m_fpr(InvalidFPRReg)
1200 {
1201     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1202         m_fpr = m_jit->reuse(op1.fpr());
1203     else
1204         m_fpr = m_jit->fprAllocate();
1205 }
1206 #endif
1207
1208 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1209 {
1210     BasicBlock* taken = branchNode->branchData()->taken.block;
1211     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1212     
1213     SpeculateDoubleOperand op1(this, node->child1());
1214     SpeculateDoubleOperand op2(this, node->child2());
1215     
1216     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1217     jump(notTaken);
1218 }
1219
1220 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1221 {
1222     BasicBlock* taken = branchNode->branchData()->taken.block;
1223     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1224
1225     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1226     
1227     if (taken == nextBlock()) {
1228         condition = MacroAssembler::NotEqual;
1229         BasicBlock* tmp = taken;
1230         taken = notTaken;
1231         notTaken = tmp;
1232     }
1233
1234     SpeculateCellOperand op1(this, node->child1());
1235     SpeculateCellOperand op2(this, node->child2());
1236     
1237     GPRReg op1GPR = op1.gpr();
1238     GPRReg op2GPR = op2.gpr();
1239     
1240     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1241         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1242             speculationCheck(
1243                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1244         }
1245         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1246             speculationCheck(
1247                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1248         }
1249     } else {
1250         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1251             speculationCheck(
1252                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1253                 m_jit.branchIfNotObject(op1GPR));
1254         }
1255         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1256             m_jit.branchTest8(
1257                 MacroAssembler::NonZero, 
1258                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1259                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1260
1261         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1262             speculationCheck(
1263                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1264                 m_jit.branchIfNotObject(op2GPR));
1265         }
1266         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1267             m_jit.branchTest8(
1268                 MacroAssembler::NonZero, 
1269                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1270                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1271     }
1272
1273     branchPtr(condition, op1GPR, op2GPR, taken);
1274     jump(notTaken);
1275 }
1276
1277 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1278 {
1279     BasicBlock* taken = branchNode->branchData()->taken.block;
1280     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1281
1282     // The branch instruction will branch to the taken block.
1283     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1284     if (taken == nextBlock()) {
1285         condition = JITCompiler::invert(condition);
1286         BasicBlock* tmp = taken;
1287         taken = notTaken;
1288         notTaken = tmp;
1289     }
1290
1291     if (node->child1()->isBooleanConstant()) {
1292         bool imm = node->child1()->asBoolean();
1293         SpeculateBooleanOperand op2(this, node->child2());
1294         branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1295     } else if (node->child2()->isBooleanConstant()) {
1296         SpeculateBooleanOperand op1(this, node->child1());
1297         bool imm = node->child2()->asBoolean();
1298         branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1299     } else {
1300         SpeculateBooleanOperand op1(this, node->child1());
1301         SpeculateBooleanOperand op2(this, node->child2());
1302         branch32(condition, op1.gpr(), op2.gpr(), taken);
1303     }
1304
1305     jump(notTaken);
1306 }
1307
1308 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1309 {
1310     BasicBlock* taken = branchNode->branchData()->taken.block;
1311     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1312
1313     // The branch instruction will branch to the taken block.
1314     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1315     if (taken == nextBlock()) {
1316         condition = JITCompiler::invert(condition);
1317         BasicBlock* tmp = taken;
1318         taken = notTaken;
1319         notTaken = tmp;
1320     }
1321
1322     if (node->child1()->isInt32Constant()) {
1323         int32_t imm = node->child1()->asInt32();
1324         SpeculateInt32Operand op2(this, node->child2());
1325         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1326     } else if (node->child2()->isInt32Constant()) {
1327         SpeculateInt32Operand op1(this, node->child1());
1328         int32_t imm = node->child2()->asInt32();
1329         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1330     } else {
1331         SpeculateInt32Operand op1(this, node->child1());
1332         SpeculateInt32Operand op2(this, node->child2());
1333         branch32(condition, op1.gpr(), op2.gpr(), taken);
1334     }
1335
1336     jump(notTaken);
1337 }
1338
1339 // Returns true if the compare is fused with a subsequent branch.
1340 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1341 {
1342     // Fused compare & branch.
1343     unsigned branchIndexInBlock = detectPeepHoleBranch();
1344     if (branchIndexInBlock != UINT_MAX) {
1345         Node* branchNode = m_block->at(branchIndexInBlock);
1346
1347         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1348         // so can be no intervening nodes to also reference the compare. 
1349         ASSERT(node->adjustedRefCount() == 1);
1350
1351         if (node->isBinaryUseKind(Int32Use))
1352             compilePeepHoleInt32Branch(node, branchNode, condition);
1353 #if USE(JSVALUE64)
1354         else if (node->isBinaryUseKind(Int52RepUse))
1355             compilePeepHoleInt52Branch(node, branchNode, condition);
1356 #endif // USE(JSVALUE64)
1357         else if (node->isBinaryUseKind(DoubleRepUse))
1358             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1359         else if (node->op() == CompareEq) {
1360             if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1361                 // Use non-peephole comparison, for now.
1362                 return false;
1363             }
1364             if (node->isBinaryUseKind(BooleanUse))
1365                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1366             else if (node->isBinaryUseKind(ObjectUse))
1367                 compilePeepHoleObjectEquality(node, branchNode);
1368             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1369                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1370             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1371                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1372             else if (!needsTypeCheck(node->child1(), SpecOther))
1373                 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1374             else if (!needsTypeCheck(node->child2(), SpecOther))
1375                 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1376             else {
1377                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1378                 return true;
1379             }
1380         } else {
1381             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1382             return true;
1383         }
1384
1385         use(node->child1());
1386         use(node->child2());
1387         m_indexInBlock = branchIndexInBlock;
1388         m_currentNode = branchNode;
1389         return true;
1390     }
1391     return false;
1392 }
1393
1394 void SpeculativeJIT::noticeOSRBirth(Node* node)
1395 {
1396     if (!node->hasVirtualRegister())
1397         return;
1398     
1399     VirtualRegister virtualRegister = node->virtualRegister();
1400     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1401     
1402     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1403 }
1404
1405 void SpeculativeJIT::compileMovHint(Node* node)
1406 {
1407     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1408     
1409     Node* child = node->child1().node();
1410     noticeOSRBirth(child);
1411     
1412     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1413 }
1414
1415 void SpeculativeJIT::bail(AbortReason reason)
1416 {
1417     if (verboseCompilationEnabled())
1418         dataLog("Bailing compilation.\n");
1419     m_compileOkay = true;
1420     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1421     clearGenerationInfo();
1422 }
1423
1424 void SpeculativeJIT::compileCurrentBlock()
1425 {
1426     ASSERT(m_compileOkay);
1427     
1428     if (!m_block)
1429         return;
1430     
1431     ASSERT(m_block->isReachable);
1432     
1433     m_jit.blockHeads()[m_block->index] = m_jit.label();
1434
1435     if (!m_block->intersectionOfCFAHasVisited) {
1436         // Don't generate code for basic blocks that are unreachable according to CFA.
1437         // But to be sure that nobody has generated a jump to this block, drop in a
1438         // breakpoint here.
1439         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1440         return;
1441     }
1442
1443     m_stream->appendAndLog(VariableEvent::reset());
1444     
1445     m_jit.jitAssertHasValidCallFrame();
1446     m_jit.jitAssertTagsInPlace();
1447     m_jit.jitAssertArgumentCountSane();
1448
1449     m_state.reset();
1450     m_state.beginBasicBlock(m_block);
1451     
1452     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1453         int operand = m_block->variablesAtHead.operandForIndex(i);
1454         Node* node = m_block->variablesAtHead[i];
1455         if (!node)
1456             continue; // No need to record dead SetLocal's.
1457         
1458         VariableAccessData* variable = node->variableAccessData();
1459         DataFormat format;
1460         if (!node->refCount())
1461             continue; // No need to record dead SetLocal's.
1462         format = dataFormatFor(variable->flushFormat());
1463         m_stream->appendAndLog(
1464             VariableEvent::setLocal(
1465                 VirtualRegister(operand),
1466                 variable->machineLocal(),
1467                 format));
1468     }
1469     
1470     m_codeOriginForExitTarget = CodeOrigin();
1471     m_codeOriginForExitProfile = CodeOrigin();
1472     
1473     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1474         m_currentNode = m_block->at(m_indexInBlock);
1475         
1476         // We may have hit a contradiction that the CFA was aware of but that the JIT
1477         // didn't cause directly.
1478         if (!m_state.isValid()) {
1479             bail(DFGBailedAtTopOfBlock);
1480             return;
1481         }
1482
1483         if (ASSERT_DISABLED)
1484             m_canExit = true; // Essentially disable the assertions.
1485         else
1486             m_canExit = mayExit(m_jit.graph(), m_currentNode);
1487         
1488         m_interpreter.startExecuting();
1489         m_jit.setForNode(m_currentNode);
1490         m_codeOriginForExitTarget = m_currentNode->origin.forExit;
1491         m_codeOriginForExitProfile = m_currentNode->origin.semantic;
1492         m_lastGeneratedNode = m_currentNode->op();
1493         
1494         ASSERT(m_currentNode->shouldGenerate());
1495         
1496         if (verboseCompilationEnabled()) {
1497             dataLogF(
1498                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1499                 (int)m_currentNode->index(),
1500                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1501             dataLog("\n");
1502         }
1503         
1504         compile(m_currentNode);
1505         
1506         if (belongsInMinifiedGraph(m_currentNode->op()))
1507             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1508         
1509 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1510         m_jit.clearRegisterAllocationOffsets();
1511 #endif
1512         
1513         if (!m_compileOkay) {
1514             bail(DFGBailedAtEndOfNode);
1515             return;
1516         }
1517         
1518         // Make sure that the abstract state is rematerialized for the next node.
1519         m_interpreter.executeEffects(m_indexInBlock);
1520     }
1521     
1522     // Perform the most basic verification that children have been used correctly.
1523     if (!ASSERT_DISABLED) {
1524         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1525             GenerationInfo& info = m_generationInfo[index];
1526             RELEASE_ASSERT(!info.alive());
1527         }
1528     }
1529 }
1530
1531 // If we are making type predictions about our arguments then
1532 // we need to check that they are correct on function entry.
1533 void SpeculativeJIT::checkArgumentTypes()
1534 {
1535     ASSERT(!m_currentNode);
1536     m_isCheckingArgumentTypes = true;
1537     m_codeOriginForExitTarget = CodeOrigin(0);
1538     m_codeOriginForExitProfile = CodeOrigin(0);
1539
1540     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1541         Node* node = m_jit.graph().m_arguments[i];
1542         if (!node) {
1543             // The argument is dead. We don't do any checks for such arguments.
1544             continue;
1545         }
1546         
1547         ASSERT(node->op() == SetArgument);
1548         ASSERT(node->shouldGenerate());
1549
1550         VariableAccessData* variableAccessData = node->variableAccessData();
1551         FlushFormat format = variableAccessData->flushFormat();
1552         
1553         if (format == FlushedJSValue)
1554             continue;
1555         
1556         VirtualRegister virtualRegister = variableAccessData->local();
1557
1558         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1559         
1560 #if USE(JSVALUE64)
1561         switch (format) {
1562         case FlushedInt32: {
1563             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1564             break;
1565         }
1566         case FlushedBoolean: {
1567             GPRTemporary temp(this);
1568             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1569             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1570             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1571             break;
1572         }
1573         case FlushedCell: {
1574             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1575             break;
1576         }
1577         default:
1578             RELEASE_ASSERT_NOT_REACHED();
1579             break;
1580         }
1581 #else
1582         switch (format) {
1583         case FlushedInt32: {
1584             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1585             break;
1586         }
1587         case FlushedBoolean: {
1588             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1589             break;
1590         }
1591         case FlushedCell: {
1592             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1593             break;
1594         }
1595         default:
1596             RELEASE_ASSERT_NOT_REACHED();
1597             break;
1598         }
1599 #endif
1600     }
1601     m_isCheckingArgumentTypes = false;
1602 }
1603
1604 bool SpeculativeJIT::compile()
1605 {
1606     checkArgumentTypes();
1607     
1608     ASSERT(!m_currentNode);
1609     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1610         m_jit.setForBlockIndex(blockIndex);
1611         m_block = m_jit.graph().block(blockIndex);
1612         compileCurrentBlock();
1613     }
1614     linkBranches();
1615     return true;
1616 }
1617
1618 void SpeculativeJIT::createOSREntries()
1619 {
1620     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1621         BasicBlock* block = m_jit.graph().block(blockIndex);
1622         if (!block)
1623             continue;
1624         if (!block->isOSRTarget)
1625             continue;
1626         
1627         // Currently we don't have OSR entry trampolines. We could add them
1628         // here if need be.
1629         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1630     }
1631 }
1632
1633 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1634 {
1635     unsigned osrEntryIndex = 0;
1636     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1637         BasicBlock* block = m_jit.graph().block(blockIndex);
1638         if (!block)
1639             continue;
1640         if (!block->isOSRTarget)
1641             continue;
1642         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1643     }
1644     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1645     
1646     if (verboseCompilationEnabled()) {
1647         DumpContext dumpContext;
1648         dataLog("OSR Entries:\n");
1649         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1650             dataLog("    ", inContext(entryData, &dumpContext), "\n");
1651         if (!dumpContext.isEmpty())
1652             dumpContext.dump(WTF::dataFile());
1653     }
1654 }
1655
1656 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1657 {
1658     Edge child3 = m_jit.graph().varArgChild(node, 2);
1659     Edge child4 = m_jit.graph().varArgChild(node, 3);
1660
1661     ArrayMode arrayMode = node->arrayMode();
1662     
1663     GPRReg baseReg = base.gpr();
1664     GPRReg propertyReg = property.gpr();
1665     
1666     SpeculateDoubleOperand value(this, child3);
1667
1668     FPRReg valueReg = value.fpr();
1669     
1670     DFG_TYPE_CHECK(
1671         JSValueRegs(), child3, SpecFullRealNumber,
1672         m_jit.branchDouble(
1673             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1674     
1675     if (!m_compileOkay)
1676         return;
1677     
1678     StorageOperand storage(this, child4);
1679     GPRReg storageReg = storage.gpr();
1680
1681     if (node->op() == PutByValAlias) {
1682         // Store the value to the array.
1683         GPRReg propertyReg = property.gpr();
1684         FPRReg valueReg = value.fpr();
1685         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1686         
1687         noResult(m_currentNode);
1688         return;
1689     }
1690     
1691     GPRTemporary temporary;
1692     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1693
1694     MacroAssembler::Jump slowCase;
1695     
1696     if (arrayMode.isInBounds()) {
1697         speculationCheck(
1698             OutOfBounds, JSValueRegs(), 0,
1699             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1700     } else {
1701         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1702         
1703         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1704         
1705         if (!arrayMode.isOutOfBounds())
1706             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1707         
1708         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1709         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1710         
1711         inBounds.link(&m_jit);
1712     }
1713     
1714     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1715
1716     base.use();
1717     property.use();
1718     value.use();
1719     storage.use();
1720     
1721     if (arrayMode.isOutOfBounds()) {
1722         addSlowPathGenerator(
1723             slowPathCall(
1724                 slowCase, this,
1725                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1726                 NoResult, baseReg, propertyReg, valueReg));
1727     }
1728
1729     noResult(m_currentNode, UseChildrenCalledExplicitly);
1730 }
1731
1732 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1733 {
1734     SpeculateCellOperand string(this, node->child1());
1735     SpeculateStrictInt32Operand index(this, node->child2());
1736     StorageOperand storage(this, node->child3());
1737
1738     GPRReg stringReg = string.gpr();
1739     GPRReg indexReg = index.gpr();
1740     GPRReg storageReg = storage.gpr();
1741     
1742     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1743
1744     // unsigned comparison so we can filter out negative indices and indices that are too large
1745     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1746
1747     GPRTemporary scratch(this);
1748     GPRReg scratchReg = scratch.gpr();
1749
1750     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1751
1752     // Load the character into scratchReg
1753     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1754
1755     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1756     JITCompiler::Jump cont8Bit = m_jit.jump();
1757
1758     is16Bit.link(&m_jit);
1759
1760     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1761
1762     cont8Bit.link(&m_jit);
1763
1764     int32Result(scratchReg, m_currentNode);
1765 }
1766
1767 void SpeculativeJIT::compileGetByValOnString(Node* node)
1768 {
1769     SpeculateCellOperand base(this, node->child1());
1770     SpeculateStrictInt32Operand property(this, node->child2());
1771     StorageOperand storage(this, node->child3());
1772     GPRReg baseReg = base.gpr();
1773     GPRReg propertyReg = property.gpr();
1774     GPRReg storageReg = storage.gpr();
1775
1776     GPRTemporary scratch(this);
1777     GPRReg scratchReg = scratch.gpr();
1778 #if USE(JSVALUE32_64)
1779     GPRTemporary resultTag;
1780     GPRReg resultTagReg = InvalidGPRReg;
1781     if (node->arrayMode().isOutOfBounds()) {
1782         GPRTemporary realResultTag(this);
1783         resultTag.adopt(realResultTag);
1784         resultTagReg = resultTag.gpr();
1785     }
1786 #endif
1787
1788     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1789
1790     // unsigned comparison so we can filter out negative indices and indices that are too large
1791     JITCompiler::Jump outOfBounds = m_jit.branch32(
1792         MacroAssembler::AboveOrEqual, propertyReg,
1793         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1794     if (node->arrayMode().isInBounds())
1795         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1796
1797     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1798
1799     // Load the character into scratchReg
1800     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1801
1802     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1803     JITCompiler::Jump cont8Bit = m_jit.jump();
1804
1805     is16Bit.link(&m_jit);
1806
1807     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1808
1809     JITCompiler::Jump bigCharacter =
1810         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1811
1812     // 8 bit string values don't need the isASCII check.
1813     cont8Bit.link(&m_jit);
1814
1815     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1816     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1817     m_jit.loadPtr(scratchReg, scratchReg);
1818
1819     addSlowPathGenerator(
1820         slowPathCall(
1821             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1822
1823     if (node->arrayMode().isOutOfBounds()) {
1824 #if USE(JSVALUE32_64)
1825         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1826 #endif
1827
1828         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1829         if (globalObject->stringPrototypeChainIsSane()) {
1830             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
1831             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
1832             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
1833             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
1834             // indexed properties either.
1835             // https://bugs.webkit.org/show_bug.cgi?id=144668
1836             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
1837             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
1838             
1839 #if USE(JSVALUE64)
1840             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1841                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
1842 #else
1843             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1844                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
1845                 baseReg, propertyReg));
1846 #endif
1847         } else {
1848 #if USE(JSVALUE64)
1849             addSlowPathGenerator(
1850                 slowPathCall(
1851                     outOfBounds, this, operationGetByValStringInt,
1852                     scratchReg, baseReg, propertyReg));
1853 #else
1854             addSlowPathGenerator(
1855                 slowPathCall(
1856                     outOfBounds, this, operationGetByValStringInt,
1857                     resultTagReg, scratchReg, baseReg, propertyReg));
1858 #endif
1859         }
1860         
1861 #if USE(JSVALUE64)
1862         jsValueResult(scratchReg, m_currentNode);
1863 #else
1864         jsValueResult(resultTagReg, scratchReg, m_currentNode);
1865 #endif
1866     } else
1867         cellResult(scratchReg, m_currentNode);
1868 }
1869
1870 void SpeculativeJIT::compileFromCharCode(Node* node)
1871 {
1872     SpeculateStrictInt32Operand property(this, node->child1());
1873     GPRReg propertyReg = property.gpr();
1874     GPRTemporary smallStrings(this);
1875     GPRTemporary scratch(this);
1876     GPRReg scratchReg = scratch.gpr();
1877     GPRReg smallStringsReg = smallStrings.gpr();
1878
1879     JITCompiler::JumpList slowCases;
1880     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
1881     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
1882     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
1883
1884     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
1885     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
1886     cellResult(scratchReg, m_currentNode);
1887 }
1888
1889 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
1890 {
1891     VirtualRegister virtualRegister = node->virtualRegister();
1892     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1893
1894     switch (info.registerFormat()) {
1895     case DataFormatStorage:
1896         RELEASE_ASSERT_NOT_REACHED();
1897
1898     case DataFormatBoolean:
1899     case DataFormatCell:
1900         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1901         return GeneratedOperandTypeUnknown;
1902
1903     case DataFormatNone:
1904     case DataFormatJSCell:
1905     case DataFormatJS:
1906     case DataFormatJSBoolean:
1907     case DataFormatJSDouble:
1908         return GeneratedOperandJSValue;
1909
1910     case DataFormatJSInt32:
1911     case DataFormatInt32:
1912         return GeneratedOperandInteger;
1913
1914     default:
1915         RELEASE_ASSERT_NOT_REACHED();
1916         return GeneratedOperandTypeUnknown;
1917     }
1918 }
1919
1920 void SpeculativeJIT::compileValueToInt32(Node* node)
1921 {
1922     switch (node->child1().useKind()) {
1923 #if USE(JSVALUE64)
1924     case Int52RepUse: {
1925         SpeculateStrictInt52Operand op1(this, node->child1());
1926         GPRTemporary result(this, Reuse, op1);
1927         GPRReg op1GPR = op1.gpr();
1928         GPRReg resultGPR = result.gpr();
1929         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
1930         int32Result(resultGPR, node, DataFormatInt32);
1931         return;
1932     }
1933 #endif // USE(JSVALUE64)
1934         
1935     case DoubleRepUse: {
1936         GPRTemporary result(this);
1937         SpeculateDoubleOperand op1(this, node->child1());
1938         FPRReg fpr = op1.fpr();
1939         GPRReg gpr = result.gpr();
1940         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
1941         
1942         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
1943         
1944         int32Result(gpr, node);
1945         return;
1946     }
1947     
1948     case NumberUse:
1949     case NotCellUse: {
1950         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
1951         case GeneratedOperandInteger: {
1952             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
1953             GPRTemporary result(this, Reuse, op1);
1954             m_jit.move(op1.gpr(), result.gpr());
1955             int32Result(result.gpr(), node, op1.format());
1956             return;
1957         }
1958         case GeneratedOperandJSValue: {
1959             GPRTemporary result(this);
1960 #if USE(JSVALUE64)
1961             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1962
1963             GPRReg gpr = op1.gpr();
1964             GPRReg resultGpr = result.gpr();
1965             FPRTemporary tempFpr(this);
1966             FPRReg fpr = tempFpr.fpr();
1967
1968             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
1969             JITCompiler::JumpList converted;
1970
1971             if (node->child1().useKind() == NumberUse) {
1972                 DFG_TYPE_CHECK(
1973                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
1974                     m_jit.branchTest64(
1975                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
1976             } else {
1977                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
1978                 
1979                 DFG_TYPE_CHECK(
1980                     JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
1981                 
1982                 // It's not a cell: so true turns into 1 and all else turns into 0.
1983                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
1984                 converted.append(m_jit.jump());
1985                 
1986                 isNumber.link(&m_jit);
1987             }
1988
1989             // First, if we get here we have a double encoded as a JSValue
1990             m_jit.move(gpr, resultGpr);
1991             unboxDouble(resultGpr, fpr);
1992
1993             silentSpillAllRegisters(resultGpr);
1994             callOperation(toInt32, resultGpr, fpr);
1995             silentFillAllRegisters(resultGpr);
1996
1997             converted.append(m_jit.jump());
1998
1999             isInteger.link(&m_jit);
2000             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2001
2002             converted.link(&m_jit);
2003 #else
2004             Node* childNode = node->child1().node();
2005             VirtualRegister virtualRegister = childNode->virtualRegister();
2006             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2007
2008             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2009
2010             GPRReg payloadGPR = op1.payloadGPR();
2011             GPRReg resultGpr = result.gpr();
2012         
2013             JITCompiler::JumpList converted;
2014
2015             if (info.registerFormat() == DataFormatJSInt32)
2016                 m_jit.move(payloadGPR, resultGpr);
2017             else {
2018                 GPRReg tagGPR = op1.tagGPR();
2019                 FPRTemporary tempFpr(this);
2020                 FPRReg fpr = tempFpr.fpr();
2021                 FPRTemporary scratch(this);
2022
2023                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2024
2025                 if (node->child1().useKind() == NumberUse) {
2026                     DFG_TYPE_CHECK(
2027                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2028                         m_jit.branch32(
2029                             MacroAssembler::AboveOrEqual, tagGPR,
2030                             TrustedImm32(JSValue::LowestTag)));
2031                 } else {
2032                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2033                     
2034                     DFG_TYPE_CHECK(
2035                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2036                         m_jit.branchIfCell(op1.jsValueRegs()));
2037                     
2038                     // It's not a cell: so true turns into 1 and all else turns into 0.
2039                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2040                     m_jit.move(TrustedImm32(0), resultGpr);
2041                     converted.append(m_jit.jump());
2042                     
2043                     isBoolean.link(&m_jit);
2044                     m_jit.move(payloadGPR, resultGpr);
2045                     converted.append(m_jit.jump());
2046                     
2047                     isNumber.link(&m_jit);
2048                 }
2049
2050                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2051
2052                 silentSpillAllRegisters(resultGpr);
2053                 callOperation(toInt32, resultGpr, fpr);
2054                 silentFillAllRegisters(resultGpr);
2055
2056                 converted.append(m_jit.jump());
2057
2058                 isInteger.link(&m_jit);
2059                 m_jit.move(payloadGPR, resultGpr);
2060
2061                 converted.link(&m_jit);
2062             }
2063 #endif
2064             int32Result(resultGpr, node);
2065             return;
2066         }
2067         case GeneratedOperandTypeUnknown:
2068             RELEASE_ASSERT(!m_compileOkay);
2069             return;
2070         }
2071         RELEASE_ASSERT_NOT_REACHED();
2072         return;
2073     }
2074     
2075     default:
2076         ASSERT(!m_compileOkay);
2077         return;
2078     }
2079 }
2080
2081 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2082 {
2083     if (doesOverflow(node->arithMode())) {
2084         // We know that this sometimes produces doubles. So produce a double every
2085         // time. This at least allows subsequent code to not have weird conditionals.
2086             
2087         SpeculateInt32Operand op1(this, node->child1());
2088         FPRTemporary result(this);
2089             
2090         GPRReg inputGPR = op1.gpr();
2091         FPRReg outputFPR = result.fpr();
2092             
2093         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2094             
2095         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2096         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2097         positive.link(&m_jit);
2098             
2099         doubleResult(outputFPR, node);
2100         return;
2101     }
2102     
2103     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2104
2105     SpeculateInt32Operand op1(this, node->child1());
2106     GPRTemporary result(this);
2107
2108     m_jit.move(op1.gpr(), result.gpr());
2109
2110     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2111
2112     int32Result(result.gpr(), node, op1.format());
2113 }
2114
2115 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2116 {
2117     SpeculateDoubleOperand op1(this, node->child1());
2118     FPRTemporary scratch(this);
2119     GPRTemporary result(this);
2120     
2121     FPRReg valueFPR = op1.fpr();
2122     FPRReg scratchFPR = scratch.fpr();
2123     GPRReg resultGPR = result.gpr();
2124
2125     JITCompiler::JumpList failureCases;
2126     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2127     m_jit.branchConvertDoubleToInt32(
2128         valueFPR, resultGPR, failureCases, scratchFPR,
2129         shouldCheckNegativeZero(node->arithMode()));
2130     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2131
2132     int32Result(resultGPR, node);
2133 }
2134
2135 void SpeculativeJIT::compileDoubleRep(Node* node)
2136 {
2137     switch (node->child1().useKind()) {
2138     case RealNumberUse: {
2139         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2140         FPRTemporary result(this);
2141         
2142         JSValueRegs op1Regs = op1.jsValueRegs();
2143         FPRReg resultFPR = result.fpr();
2144         
2145 #if USE(JSVALUE64)
2146         GPRTemporary temp(this);
2147         GPRReg tempGPR = temp.gpr();
2148         m_jit.move(op1Regs.gpr(), tempGPR);
2149         m_jit.unboxDoubleWithoutAssertions(tempGPR, resultFPR);
2150 #else
2151         FPRTemporary temp(this);
2152         FPRReg tempFPR = temp.fpr();
2153         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2154 #endif
2155         
2156         JITCompiler::Jump done = m_jit.branchDouble(
2157             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2158         
2159         DFG_TYPE_CHECK(
2160             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2161         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2162         
2163         done.link(&m_jit);
2164         
2165         doubleResult(resultFPR, node);
2166         return;
2167     }
2168     
2169     case NotCellUse:
2170     case NumberUse: {
2171         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2172
2173         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2174         if (isInt32Speculation(possibleTypes)) {
2175             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2176             FPRTemporary result(this);
2177             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2178             doubleResult(result.fpr(), node);
2179             return;
2180         }
2181
2182         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2183         FPRTemporary result(this);
2184
2185 #if USE(JSVALUE64)
2186         GPRTemporary temp(this);
2187
2188         GPRReg op1GPR = op1.gpr();
2189         GPRReg tempGPR = temp.gpr();
2190         FPRReg resultFPR = result.fpr();
2191         JITCompiler::JumpList done;
2192
2193         JITCompiler::Jump isInteger = m_jit.branch64(
2194             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2195
2196         if (node->child1().useKind() == NotCellUse) {
2197             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2198             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2199
2200             static const double zero = 0;
2201             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2202
2203             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2204             done.append(isNull);
2205
2206             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2207                 m_jit.branchTest64(JITCompiler::NonZero, op1GPR, TrustedImm32(static_cast<int32_t>(~1))));
2208
2209             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2210             static const double one = 1;
2211             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2212             done.append(isFalse);
2213
2214             isUndefined.link(&m_jit);
2215             static const double NaN = PNaN;
2216             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2217             done.append(m_jit.jump());
2218
2219             isNumber.link(&m_jit);
2220         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2221             typeCheck(
2222                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2223                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2224         }
2225     
2226         m_jit.move(op1GPR, tempGPR);
2227         unboxDouble(tempGPR, resultFPR);
2228         done.append(m_jit.jump());
2229     
2230         isInteger.link(&m_jit);
2231         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2232         done.link(&m_jit);
2233 #else // USE(JSVALUE64) -> this is the 32_64 case
2234         FPRTemporary temp(this);
2235     
2236         GPRReg op1TagGPR = op1.tagGPR();
2237         GPRReg op1PayloadGPR = op1.payloadGPR();
2238         FPRReg tempFPR = temp.fpr();
2239         FPRReg resultFPR = result.fpr();
2240         JITCompiler::JumpList done;
2241     
2242         JITCompiler::Jump isInteger = m_jit.branch32(
2243             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2244
2245         if (node->child1().useKind() == NotCellUse) {
2246             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2247             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2248
2249             static const double zero = 0;
2250             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2251
2252             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2253             done.append(isNull);
2254
2255             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2256
2257             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2258             static const double one = 1;
2259             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2260             done.append(isFalse);
2261
2262             isUndefined.link(&m_jit);
2263             static const double NaN = PNaN;
2264             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2265             done.append(m_jit.jump());
2266
2267             isNumber.link(&m_jit);
2268         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2269             typeCheck(
2270                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2271                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2272         }
2273
2274         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2275         done.append(m_jit.jump());
2276     
2277         isInteger.link(&m_jit);
2278         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2279         done.link(&m_jit);
2280 #endif // USE(JSVALUE64)
2281     
2282         doubleResult(resultFPR, node);
2283         return;
2284     }
2285         
2286 #if USE(JSVALUE64)
2287     case Int52RepUse: {
2288         SpeculateStrictInt52Operand value(this, node->child1());
2289         FPRTemporary result(this);
2290         
2291         GPRReg valueGPR = value.gpr();
2292         FPRReg resultFPR = result.fpr();
2293
2294         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2295         
2296         doubleResult(resultFPR, node);
2297         return;
2298     }
2299 #endif // USE(JSVALUE64)
2300         
2301     default:
2302         RELEASE_ASSERT_NOT_REACHED();
2303         return;
2304     }
2305 }
2306
2307 void SpeculativeJIT::compileValueRep(Node* node)
2308 {
2309     switch (node->child1().useKind()) {
2310     case DoubleRepUse: {
2311         SpeculateDoubleOperand value(this, node->child1());
2312         JSValueRegsTemporary result(this);
2313         
2314         FPRReg valueFPR = value.fpr();
2315         JSValueRegs resultRegs = result.regs();
2316         
2317         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2318         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2319         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2320         // local was purified.
2321         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2322             m_jit.purifyNaN(valueFPR);
2323
2324         boxDouble(valueFPR, resultRegs);
2325         
2326         jsValueResult(resultRegs, node);
2327         return;
2328     }
2329         
2330 #if USE(JSVALUE64)
2331     case Int52RepUse: {
2332         SpeculateStrictInt52Operand value(this, node->child1());
2333         GPRTemporary result(this);
2334         
2335         GPRReg valueGPR = value.gpr();
2336         GPRReg resultGPR = result.gpr();
2337         
2338         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2339         
2340         jsValueResult(resultGPR, node);
2341         return;
2342     }
2343 #endif // USE(JSVALUE64)
2344         
2345     default:
2346         RELEASE_ASSERT_NOT_REACHED();
2347         return;
2348     }
2349 }
2350
2351 static double clampDoubleToByte(double d)
2352 {
2353     d += 0.5;
2354     if (!(d > 0))
2355         d = 0;
2356     else if (d > 255)
2357         d = 255;
2358     return d;
2359 }
2360
2361 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2362 {
2363     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2364     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2365     jit.xorPtr(result, result);
2366     MacroAssembler::Jump clamped = jit.jump();
2367     tooBig.link(&jit);
2368     jit.move(JITCompiler::TrustedImm32(255), result);
2369     clamped.link(&jit);
2370     inBounds.link(&jit);
2371 }
2372
2373 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2374 {
2375     // Unordered compare so we pick up NaN
2376     static const double zero = 0;
2377     static const double byteMax = 255;
2378     static const double half = 0.5;
2379     jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2380     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2381     jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2382     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2383     
2384     jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2385     // FIXME: This should probably just use a floating point round!
2386     // https://bugs.webkit.org/show_bug.cgi?id=72054
2387     jit.addDouble(source, scratch);
2388     jit.truncateDoubleToInt32(scratch, result);   
2389     MacroAssembler::Jump truncatedInt = jit.jump();
2390     
2391     tooSmall.link(&jit);
2392     jit.xorPtr(result, result);
2393     MacroAssembler::Jump zeroed = jit.jump();
2394     
2395     tooBig.link(&jit);
2396     jit.move(JITCompiler::TrustedImm32(255), result);
2397     
2398     truncatedInt.link(&jit);
2399     zeroed.link(&jit);
2400
2401 }
2402
2403 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2404 {
2405     if (node->op() == PutByValAlias)
2406         return JITCompiler::Jump();
2407     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2408         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2409     if (view) {
2410         uint32_t length = view->length();
2411         Node* indexNode = m_jit.graph().child(node, 1).node();
2412         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2413             return JITCompiler::Jump();
2414         return m_jit.branch32(
2415             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2416     }
2417     return m_jit.branch32(
2418         MacroAssembler::AboveOrEqual, indexGPR,
2419         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2420 }
2421
2422 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2423 {
2424     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2425     if (!jump.isSet())
2426         return;
2427     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2428 }
2429
2430 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2431 {
2432     ASSERT(isInt(type));
2433     
2434     SpeculateCellOperand base(this, node->child1());
2435     SpeculateStrictInt32Operand property(this, node->child2());
2436     StorageOperand storage(this, node->child3());
2437
2438     GPRReg baseReg = base.gpr();
2439     GPRReg propertyReg = property.gpr();
2440     GPRReg storageReg = storage.gpr();
2441
2442     GPRTemporary result(this);
2443     GPRReg resultReg = result.gpr();
2444
2445     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2446
2447     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2448     switch (elementSize(type)) {
2449     case 1:
2450         if (isSigned(type))
2451             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2452         else
2453             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2454         break;
2455     case 2:
2456         if (isSigned(type))
2457             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2458         else
2459             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2460         break;
2461     case 4:
2462         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2463         break;
2464     default:
2465         CRASH();
2466     }
2467     if (elementSize(type) < 4 || isSigned(type)) {
2468         int32Result(resultReg, node);
2469         return;
2470     }
2471     
2472     ASSERT(elementSize(type) == 4 && !isSigned(type));
2473     if (node->shouldSpeculateInt32()) {
2474         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2475         int32Result(resultReg, node);
2476         return;
2477     }
2478     
2479 #if USE(JSVALUE64)
2480     if (node->shouldSpeculateMachineInt()) {
2481         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2482         strictInt52Result(resultReg, node);
2483         return;
2484     }
2485 #endif
2486     
2487     FPRTemporary fresult(this);
2488     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2489     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2490     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2491     positive.link(&m_jit);
2492     doubleResult(fresult.fpr(), node);
2493 }
2494
2495 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2496 {
2497     ASSERT(isInt(type));
2498     
2499     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2500     GPRReg storageReg = storage.gpr();
2501     
2502     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2503     
2504     GPRTemporary value;
2505     GPRReg valueGPR = InvalidGPRReg;
2506     
2507     if (valueUse->isConstant()) {
2508         JSValue jsValue = valueUse->asJSValue();
2509         if (!jsValue.isNumber()) {
2510             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2511             noResult(node);
2512             return;
2513         }
2514         double d = jsValue.asNumber();
2515         if (isClamped(type)) {
2516             ASSERT(elementSize(type) == 1);
2517             d = clampDoubleToByte(d);
2518         }
2519         GPRTemporary scratch(this);
2520         GPRReg scratchReg = scratch.gpr();
2521         m_jit.move(Imm32(toInt32(d)), scratchReg);
2522         value.adopt(scratch);
2523         valueGPR = scratchReg;
2524     } else {
2525         switch (valueUse.useKind()) {
2526         case Int32Use: {
2527             SpeculateInt32Operand valueOp(this, valueUse);
2528             GPRTemporary scratch(this);
2529             GPRReg scratchReg = scratch.gpr();
2530             m_jit.move(valueOp.gpr(), scratchReg);
2531             if (isClamped(type)) {
2532                 ASSERT(elementSize(type) == 1);
2533                 compileClampIntegerToByte(m_jit, scratchReg);
2534             }
2535             value.adopt(scratch);
2536             valueGPR = scratchReg;
2537             break;
2538         }
2539             
2540 #if USE(JSVALUE64)
2541         case Int52RepUse: {
2542             SpeculateStrictInt52Operand valueOp(this, valueUse);
2543             GPRTemporary scratch(this);
2544             GPRReg scratchReg = scratch.gpr();
2545             m_jit.move(valueOp.gpr(), scratchReg);
2546             if (isClamped(type)) {
2547                 ASSERT(elementSize(type) == 1);
2548                 MacroAssembler::Jump inBounds = m_jit.branch64(
2549                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2550                 MacroAssembler::Jump tooBig = m_jit.branch64(
2551                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2552                 m_jit.move(TrustedImm32(0), scratchReg);
2553                 MacroAssembler::Jump clamped = m_jit.jump();
2554                 tooBig.link(&m_jit);
2555                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2556                 clamped.link(&m_jit);
2557                 inBounds.link(&m_jit);
2558             }
2559             value.adopt(scratch);
2560             valueGPR = scratchReg;
2561             break;
2562         }
2563 #endif // USE(JSVALUE64)
2564             
2565         case DoubleRepUse: {
2566             if (isClamped(type)) {
2567                 ASSERT(elementSize(type) == 1);
2568                 SpeculateDoubleOperand valueOp(this, valueUse);
2569                 GPRTemporary result(this);
2570                 FPRTemporary floatScratch(this);
2571                 FPRReg fpr = valueOp.fpr();
2572                 GPRReg gpr = result.gpr();
2573                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2574                 value.adopt(result);
2575                 valueGPR = gpr;
2576             } else {
2577                 SpeculateDoubleOperand valueOp(this, valueUse);
2578                 GPRTemporary result(this);
2579                 FPRReg fpr = valueOp.fpr();
2580                 GPRReg gpr = result.gpr();
2581                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2582                 m_jit.xorPtr(gpr, gpr);
2583                 MacroAssembler::Jump fixed = m_jit.jump();
2584                 notNaN.link(&m_jit);
2585                 
2586                 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2587                     fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2588                 
2589                 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr));
2590                 
2591                 fixed.link(&m_jit);
2592                 value.adopt(result);
2593                 valueGPR = gpr;
2594             }
2595             break;
2596         }
2597             
2598         default:
2599             RELEASE_ASSERT_NOT_REACHED();
2600             break;
2601         }
2602     }
2603     
2604     ASSERT_UNUSED(valueGPR, valueGPR != property);
2605     ASSERT(valueGPR != base);
2606     ASSERT(valueGPR != storageReg);
2607     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2608     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2609         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2610         outOfBounds = MacroAssembler::Jump();
2611     }
2612
2613     switch (elementSize(type)) {
2614     case 1:
2615         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2616         break;
2617     case 2:
2618         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2619         break;
2620     case 4:
2621         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2622         break;
2623     default:
2624         CRASH();
2625     }
2626     if (outOfBounds.isSet())
2627         outOfBounds.link(&m_jit);
2628     noResult(node);
2629 }
2630
2631 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2632 {
2633     ASSERT(isFloat(type));
2634     
2635     SpeculateCellOperand base(this, node->child1());
2636     SpeculateStrictInt32Operand property(this, node->child2());
2637     StorageOperand storage(this, node->child3());
2638
2639     GPRReg baseReg = base.gpr();
2640     GPRReg propertyReg = property.gpr();
2641     GPRReg storageReg = storage.gpr();
2642
2643     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2644
2645     FPRTemporary result(this);
2646     FPRReg resultReg = result.fpr();
2647     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2648     switch (elementSize(type)) {
2649     case 4:
2650         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2651         m_jit.convertFloatToDouble(resultReg, resultReg);
2652         break;
2653     case 8: {
2654         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2655         break;
2656     }
2657     default:
2658         RELEASE_ASSERT_NOT_REACHED();
2659     }
2660     
2661     doubleResult(resultReg, node);
2662 }
2663
2664 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2665 {
2666     ASSERT(isFloat(type));
2667     
2668     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2669     GPRReg storageReg = storage.gpr();
2670     
2671     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2672     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2673
2674     SpeculateDoubleOperand valueOp(this, valueUse);
2675     FPRTemporary scratch(this);
2676     FPRReg valueFPR = valueOp.fpr();
2677     FPRReg scratchFPR = scratch.fpr();
2678
2679     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2680     
2681     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2682     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2683         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2684         outOfBounds = MacroAssembler::Jump();
2685     }
2686     
2687     switch (elementSize(type)) {
2688     case 4: {
2689         m_jit.moveDouble(valueFPR, scratchFPR);
2690         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2691         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2692         break;
2693     }
2694     case 8:
2695         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2696         break;
2697     default:
2698         RELEASE_ASSERT_NOT_REACHED();
2699     }
2700     if (outOfBounds.isSet())
2701         outOfBounds.link(&m_jit);
2702     noResult(node);
2703 }
2704
2705 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2706 {
2707     // Check that prototype is an object.
2708     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
2709     
2710     // Initialize scratchReg with the value being checked.
2711     m_jit.move(valueReg, scratchReg);
2712     
2713     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2714     MacroAssembler::Label loop(&m_jit);
2715     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2716     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2717     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2718 #if USE(JSVALUE64)
2719     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2720 #else
2721     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2722 #endif
2723     
2724     // No match - result is false.
2725 #if USE(JSVALUE64)
2726     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2727 #else
2728     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2729 #endif
2730     MacroAssembler::Jump putResult = m_jit.jump();
2731     
2732     isInstance.link(&m_jit);
2733 #if USE(JSVALUE64)
2734     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2735 #else
2736     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2737 #endif
2738     
2739     putResult.link(&m_jit);
2740 }
2741
2742 void SpeculativeJIT::compileInstanceOf(Node* node)
2743 {
2744     if (node->child1().useKind() == UntypedUse) {
2745         // It might not be a cell. Speculate less aggressively.
2746         // Or: it might only be used once (i.e. by us), so we get zero benefit
2747         // from speculating any more aggressively than we absolutely need to.
2748         
2749         JSValueOperand value(this, node->child1());
2750         SpeculateCellOperand prototype(this, node->child2());
2751         GPRTemporary scratch(this);
2752         GPRTemporary scratch2(this);
2753         
2754         GPRReg prototypeReg = prototype.gpr();
2755         GPRReg scratchReg = scratch.gpr();
2756         GPRReg scratch2Reg = scratch2.gpr();
2757         
2758         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
2759         GPRReg valueReg = value.jsValueRegs().payloadGPR();
2760         moveFalseTo(scratchReg);
2761
2762         MacroAssembler::Jump done = m_jit.jump();
2763         
2764         isCell.link(&m_jit);
2765         
2766         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2767         
2768         done.link(&m_jit);
2769
2770         blessedBooleanResult(scratchReg, node);
2771         return;
2772     }
2773     
2774     SpeculateCellOperand value(this, node->child1());
2775     SpeculateCellOperand prototype(this, node->child2());
2776     
2777     GPRTemporary scratch(this);
2778     GPRTemporary scratch2(this);
2779     
2780     GPRReg valueReg = value.gpr();
2781     GPRReg prototypeReg = prototype.gpr();
2782     GPRReg scratchReg = scratch.gpr();
2783     GPRReg scratch2Reg = scratch2.gpr();
2784     
2785     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2786
2787     blessedBooleanResult(scratchReg, node);
2788 }
2789
2790 void SpeculativeJIT::compileAdd(Node* node)
2791 {
2792     switch (node->binaryUseKind()) {
2793     case Int32Use: {
2794         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2795         
2796         if (node->child1()->isInt32Constant()) {
2797             int32_t imm1 = node->child1()->asInt32();
2798             SpeculateInt32Operand op2(this, node->child2());
2799             GPRTemporary result(this);
2800
2801             if (!shouldCheckOverflow(node->arithMode())) {
2802                 m_jit.move(op2.gpr(), result.gpr());
2803                 m_jit.add32(Imm32(imm1), result.gpr());
2804             } else
2805                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
2806
2807             int32Result(result.gpr(), node);
2808             return;
2809         }
2810         
2811         if (node->child2()->isInt32Constant()) {
2812             SpeculateInt32Operand op1(this, node->child1());
2813             int32_t imm2 = node->child2()->asInt32();
2814             GPRTemporary result(this);
2815                 
2816             if (!shouldCheckOverflow(node->arithMode())) {
2817                 m_jit.move(op1.gpr(), result.gpr());
2818                 m_jit.add32(Imm32(imm2), result.gpr());
2819             } else
2820                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
2821
2822             int32Result(result.gpr(), node);
2823             return;
2824         }
2825                 
2826         SpeculateInt32Operand op1(this, node->child1());
2827         SpeculateInt32Operand op2(this, node->child2());
2828         GPRTemporary result(this, Reuse, op1, op2);
2829
2830         GPRReg gpr1 = op1.gpr();
2831         GPRReg gpr2 = op2.gpr();
2832         GPRReg gprResult = result.gpr();
2833
2834         if (!shouldCheckOverflow(node->arithMode())) {
2835             if (gpr1 == gprResult)
2836                 m_jit.add32(gpr2, gprResult);
2837             else {
2838                 m_jit.move(gpr2, gprResult);
2839                 m_jit.add32(gpr1, gprResult);
2840             }
2841         } else {
2842             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
2843                 
2844             if (gpr1 == gprResult)
2845                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
2846             else if (gpr2 == gprResult)
2847                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
2848             else
2849                 speculationCheck(Overflow, JSValueRegs(), 0, check);
2850         }
2851
2852         int32Result(gprResult, node);
2853         return;
2854     }
2855         
2856 #if USE(JSVALUE64)
2857     case Int52RepUse: {
2858         ASSERT(shouldCheckOverflow(node->arithMode()));
2859         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2860
2861         // Will we need an overflow check? If we can prove that neither input can be
2862         // Int52 then the overflow check will not be necessary.
2863         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2864             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2865             SpeculateWhicheverInt52Operand op1(this, node->child1());
2866             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2867             GPRTemporary result(this, Reuse, op1);
2868             m_jit.move(op1.gpr(), result.gpr());
2869             m_jit.add64(op2.gpr(), result.gpr());
2870             int52Result(result.gpr(), node, op1.format());
2871             return;
2872         }
2873         
2874         SpeculateInt52Operand op1(this, node->child1());
2875         SpeculateInt52Operand op2(this, node->child2());
2876         GPRTemporary result(this);
2877         m_jit.move(op1.gpr(), result.gpr());
2878         speculationCheck(
2879             Int52Overflow, JSValueRegs(), 0,
2880             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2881         int52Result(result.gpr(), node);
2882         return;
2883     }
2884 #endif // USE(JSVALUE64)
2885     
2886     case DoubleRepUse: {
2887         SpeculateDoubleOperand op1(this, node->child1());
2888         SpeculateDoubleOperand op2(this, node->child2());
2889         FPRTemporary result(this, op1, op2);
2890
2891         FPRReg reg1 = op1.fpr();
2892         FPRReg reg2 = op2.fpr();
2893         m_jit.addDouble(reg1, reg2, result.fpr());
2894
2895         doubleResult(result.fpr(), node);
2896         return;
2897     }
2898         
2899     default:
2900         RELEASE_ASSERT_NOT_REACHED();
2901         break;
2902     }
2903 }
2904
2905 void SpeculativeJIT::compileMakeRope(Node* node)
2906 {
2907     ASSERT(node->child1().useKind() == KnownStringUse);
2908     ASSERT(node->child2().useKind() == KnownStringUse);
2909     ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
2910     
2911     SpeculateCellOperand op1(this, node->child1());
2912     SpeculateCellOperand op2(this, node->child2());
2913     SpeculateCellOperand op3(this, node->child3());
2914     GPRTemporary result(this);
2915     GPRTemporary allocator(this);
2916     GPRTemporary scratch(this);
2917     
2918     GPRReg opGPRs[3];
2919     unsigned numOpGPRs;
2920     opGPRs[0] = op1.gpr();
2921     opGPRs[1] = op2.gpr();
2922     if (node->child3()) {
2923         opGPRs[2] = op3.gpr();
2924         numOpGPRs = 3;
2925     } else {
2926         opGPRs[2] = InvalidGPRReg;
2927         numOpGPRs = 2;
2928     }
2929     GPRReg resultGPR = result.gpr();
2930     GPRReg allocatorGPR = allocator.gpr();
2931     GPRReg scratchGPR = scratch.gpr();
2932     
2933     JITCompiler::JumpList slowPath;
2934     MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithDestructor(sizeof(JSRopeString));
2935     m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
2936     emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
2937         
2938     m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
2939     for (unsigned i = 0; i < numOpGPRs; ++i)
2940         m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2941     for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
2942         m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2943     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
2944     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
2945     if (!ASSERT_DISABLED) {
2946         JITCompiler::Jump ok = m_jit.branch32(
2947             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2948         m_jit.abortWithReason(DFGNegativeStringLength);
2949         ok.link(&m_jit);
2950     }
2951     for (unsigned i = 1; i < numOpGPRs; ++i) {
2952         m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
2953         speculationCheck(
2954             Uncountable, JSValueSource(), nullptr,
2955             m_jit.branchAdd32(
2956                 JITCompiler::Overflow,
2957                 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
2958     }
2959     m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
2960     m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
2961     if (!ASSERT_DISABLED) {
2962         JITCompiler::Jump ok = m_jit.branch32(
2963             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2964         m_jit.abortWithReason(DFGNegativeStringLength);
2965         ok.link(&m_jit);
2966     }
2967     m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
2968     
2969     switch (numOpGPRs) {
2970     case 2:
2971         addSlowPathGenerator(slowPathCall(
2972             slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
2973         break;
2974     case 3:
2975         addSlowPathGenerator(slowPathCall(
2976             slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
2977         break;
2978     default:
2979         RELEASE_ASSERT_NOT_REACHED();
2980         break;
2981     }
2982         
2983     cellResult(resultGPR, node);
2984 }
2985
2986 void SpeculativeJIT::compileArithClz32(Node* node)
2987 {
2988     ASSERT_WITH_MESSAGE(node->child1().useKind() == Int32Use || node->child1().useKind() == KnownInt32Use, "The Fixup phase should have enforced a Int32 operand.");
2989     SpeculateInt32Operand value(this, node->child1());
2990     GPRTemporary result(this, Reuse, value);
2991     GPRReg valueReg = value.gpr();
2992     GPRReg resultReg = result.gpr();
2993     m_jit.countLeadingZeros32(valueReg, resultReg);
2994     int32Result(resultReg, node);
2995 }
2996
2997 void SpeculativeJIT::compileArithSub(Node* node)
2998 {
2999     switch (node->binaryUseKind()) {
3000     case Int32Use: {
3001         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3002         
3003         if (node->child2()->isInt32Constant()) {
3004             SpeculateInt32Operand op1(this, node->child1());
3005             int32_t imm2 = node->child2()->asInt32();
3006             GPRTemporary result(this);
3007
3008             if (!shouldCheckOverflow(node->arithMode())) {
3009                 m_jit.move(op1.gpr(), result.gpr());
3010                 m_jit.sub32(Imm32(imm2), result.gpr());
3011             } else {
3012                 GPRTemporary scratch(this);
3013                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
3014             }
3015
3016             int32Result(result.gpr(), node);
3017             return;
3018         }
3019             
3020         if (node->child1()->isInt32Constant()) {
3021             int32_t imm1 = node->child1()->asInt32();
3022             SpeculateInt32Operand op2(this, node->child2());
3023             GPRTemporary result(this);
3024                 
3025             m_jit.move(Imm32(imm1), result.gpr());
3026             if (!shouldCheckOverflow(node->arithMode()))
3027                 m_jit.sub32(op2.gpr(), result.gpr());
3028             else
3029                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3030                 
3031             int32Result(result.gpr(), node);
3032             return;
3033         }
3034             
3035         SpeculateInt32Operand op1(this, node->child1());
3036         SpeculateInt32Operand op2(this, node->child2());
3037         GPRTemporary result(this);
3038
3039         if (!shouldCheckOverflow(node->arithMode())) {
3040             m_jit.move(op1.gpr(), result.gpr());
3041             m_jit.sub32(op2.gpr(), result.gpr());
3042         } else
3043             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
3044
3045         int32Result(result.gpr(), node);
3046         return;
3047     }
3048         
3049 #if USE(JSVALUE64)
3050     case Int52RepUse: {
3051         ASSERT(shouldCheckOverflow(node->arithMode()));
3052         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3053
3054         // Will we need an overflow check? If we can prove that neither input can be
3055         // Int52 then the overflow check will not be necessary.
3056         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
3057             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
3058             SpeculateWhicheverInt52Operand op1(this, node->child1());
3059             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3060             GPRTemporary result(this, Reuse, op1);
3061             m_jit.move(op1.gpr(), result.gpr());
3062             m_jit.sub64(op2.gpr(), result.gpr());
3063             int52Result(result.gpr(), node, op1.format());
3064             return;
3065         }
3066         
3067         SpeculateInt52Operand op1(this, node->child1());
3068         SpeculateInt52Operand op2(this, node->child2());
3069         GPRTemporary result(this);
3070         m_jit.move(op1.gpr(), result.gpr());
3071         speculationCheck(
3072             Int52Overflow, JSValueRegs(), 0,
3073             m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3074         int52Result(result.gpr(), node);
3075         return;
3076     }
3077 #endif // USE(JSVALUE64)
3078
3079     case DoubleRepUse: {
3080         SpeculateDoubleOperand op1(this, node->child1());
3081         SpeculateDoubleOperand op2(this, node->child2());
3082         FPRTemporary result(this, op1);
3083
3084         FPRReg reg1 = op1.fpr();
3085         FPRReg reg2 = op2.fpr();
3086         m_jit.subDouble(reg1, reg2, result.fpr());
3087
3088         doubleResult(result.fpr(), node);
3089         return;
3090     }
3091         
3092     default:
3093         RELEASE_ASSERT_NOT_REACHED();
3094         return;
3095     }
3096 }
3097
3098 void SpeculativeJIT::compileArithNegate(Node* node)
3099 {
3100     switch (node->child1().useKind()) {
3101     case Int32Use: {
3102         SpeculateInt32Operand op1(this, node->child1());
3103         GPRTemporary result(this);
3104
3105         m_jit.move(op1.gpr(), result.gpr());
3106
3107         // Note: there is no notion of being not used as a number, but someone
3108         // caring about negative zero.
3109         
3110         if (!shouldCheckOverflow(node->arithMode()))
3111             m_jit.neg32(result.gpr());
3112         else if (!shouldCheckNegativeZero(node->arithMode()))
3113             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
3114         else {
3115             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
3116             m_jit.neg32(result.gpr());
3117         }
3118
3119         int32Result(result.gpr(), node);
3120         return;
3121     }
3122
3123 #if USE(JSVALUE64)
3124     case Int52RepUse: {
3125         ASSERT(shouldCheckOverflow(node->arithMode()));
3126         
3127         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)) {
3128             SpeculateWhicheverInt52Operand op1(this, node->child1());
3129             GPRTemporary result(this);
3130             GPRReg op1GPR = op1.gpr();
3131             GPRReg resultGPR = result.gpr();
3132             m_jit.move(op1GPR, resultGPR);
3133             m_jit.neg64(resultGPR);
3134             if (shouldCheckNegativeZero(node->arithMode())) {
3135                 speculationCheck(
3136                     NegativeZero, JSValueRegs(), 0,
3137                     m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3138             }
3139             int52Result(resultGPR, node, op1.format());
3140             return;
3141         }
3142         
3143         SpeculateInt52Operand op1(this, node->child1());
3144         GPRTemporary result(this);
3145         GPRReg op1GPR = op1.gpr();
3146         GPRReg resultGPR = result.gpr();
3147         m_jit.move(op1GPR, resultGPR);
3148         speculationCheck(
3149             Int52Overflow, JSValueRegs(), 0,
3150             m_jit.branchNeg64(MacroAssembler::Overflow, resultGPR));
3151         if (shouldCheckNegativeZero(node->arithMode())) {
3152             speculationCheck(
3153                 NegativeZero, JSValueRegs(), 0,
3154                 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3155         }
3156         int52Result(resultGPR, node);
3157         return;
3158     }
3159 #endif // USE(JSVALUE64)
3160         
3161     case DoubleRepUse: {
3162         SpeculateDoubleOperand op1(this, node->child1());
3163         FPRTemporary result(this);
3164         
3165         m_jit.negateDouble(op1.fpr(), result.fpr());
3166         
3167         doubleResult(result.fpr(), node);
3168         return;
3169     }
3170         
3171     default:
3172         RELEASE_ASSERT_NOT_REACHED();
3173         return;
3174     }
3175 }
3176 void SpeculativeJIT::compileArithMul(Node* node)
3177 {
3178     switch (node->binaryUseKind()) {
3179     case Int32Use: {
3180         SpeculateInt32Operand op1(this, node->child1());
3181         SpeculateInt32Operand op2(this, node->child2());
3182         GPRTemporary result(this);
3183
3184         GPRReg reg1 = op1.gpr();
3185         GPRReg reg2 = op2.gpr();
3186
3187         // We can perform truncated multiplications if we get to this point, because if the
3188         // fixup phase could not prove that it would be safe, it would have turned us into
3189         // a double multiplication.
3190         if (!shouldCheckOverflow(node->arithMode())) {
3191             m_jit.move(reg1, result.gpr());
3192             m_jit.mul32(reg2, result.gpr());
3193         } else {
3194             speculationCheck(
3195                 Overflow, JSValueRegs(), 0,
3196                 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3197         }
3198             
3199         // Check for negative zero, if the users of this node care about such things.
3200         if (shouldCheckNegativeZero(node->arithMode())) {
3201             MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3202             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
3203             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
3204             resultNonZero.link(&m_jit);
3205         }
3206
3207         int32Result(result.gpr(), node);
3208         return;
3209     }
3210     
3211 #if USE(JSVALUE64)   
3212     case Int52RepUse: {
3213         ASSERT(shouldCheckOverflow(node->arithMode()));
3214         
3215         // This is super clever. We want to do an int52 multiplication and check the
3216         // int52 overflow bit. There is no direct hardware support for this, but we do
3217         // have the ability to do an int64 multiplication and check the int64 overflow
3218         // bit. We leverage that. Consider that a, b are int52 numbers inside int64
3219         // registers, with the high 12 bits being sign-extended. We can do:
3220         //
3221         //     (a * (b << 12))
3222         //
3223         // This will give us a left-shifted int52 (value is in high 52 bits, low 16
3224         // bits are zero) plus the int52 overflow bit. I.e. whether this 64-bit
3225         // multiplication overflows is identical to whether the 'a * b' 52-bit
3226         // multiplication overflows.
3227         //
3228         // In our nomenclature, this is:
3229         //
3230         //     strictInt52(a) * int52(b) => int52
3231         //
3232         // That is "strictInt52" means unshifted and "int52" means left-shifted by 16
3233         // bits.
3234         //
3235         // We don't care which of op1 or op2 serves as the left-shifted operand, so
3236         // we just do whatever is more convenient for op1 and have op2 do the
3237         // opposite. This ensures that we do at most one shift.
3238
3239         SpeculateWhicheverInt52Operand op1(this, node->child1());
3240         SpeculateWhicheverInt52Operand op2(this, node->child2(), OppositeShift, op1);
3241         GPRTemporary result(this);
3242         
3243         GPRReg op1GPR = op1.gpr();
3244         GPRReg op2GPR = op2.gpr();
3245         GPRReg resultGPR = result.gpr();
3246         
3247         m_jit.move(op1GPR, resultGPR);
3248         speculationCheck(
3249             Int52Overflow, JSValueRegs(), 0,
3250             m_jit.branchMul64(MacroAssembler::Overflow, op2GPR, resultGPR));
3251         
3252         if (shouldCheckNegativeZero(node->arithMode())) {
3253             MacroAssembler::Jump resultNonZero = m_jit.branchTest64(
3254                 MacroAssembler::NonZero, resultGPR);
3255             speculationCheck(
3256                 NegativeZero, JSValueRegs(), 0,
3257                 m_jit.branch64(MacroAssembler::LessThan, op1GPR, TrustedImm64(0)));
3258             speculationCheck(
3259                 NegativeZero, JSValueRegs(), 0,
3260                 m_jit.branch64(MacroAssembler::LessThan, op2GPR, TrustedImm64(0)));
3261             resultNonZero.link(&m_jit);
3262         }
3263         
3264         int52Result(resultGPR, node);
3265         return;
3266     }
3267 #endif // USE(JSVALUE64)
3268         
3269     case DoubleRepUse: {
3270         SpeculateDoubleOperand op1(this, node->child1());
3271         SpeculateDoubleOperand op2(this, node->child2());
3272         FPRTemporary result(this, op1, op2);
3273         
3274         FPRReg reg1 = op1.fpr();
3275         FPRReg reg2 = op2.fpr();
3276         
3277         m_jit.mulDouble(reg1, reg2, result.fpr());
3278         
3279         doubleResult(result.fpr(), node);
3280         return;
3281     }
3282         
3283     default:
3284         RELEASE_ASSERT_NOT_REACHED();
3285         return;
3286     }
3287 }
3288
3289 void SpeculativeJIT::compileArithDiv(Node* node)
3290 {
3291     switch (node->binaryUseKind()) {
3292     case Int32Use: {
3293 #if CPU(X86) || CPU(X86_64)
3294         SpeculateInt32Operand op1(this, node->child1());
3295         SpeculateInt32Operand op2(this, node->child2());
3296         GPRTemporary eax(this, X86Registers::eax);
3297         GPRTemporary edx(this, X86Registers::edx);
3298         GPRReg op1GPR = op1.gpr();
3299         GPRReg op2GPR = op2.gpr();
3300     
3301         GPRReg op2TempGPR;
3302         GPRReg temp;
3303         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3304             op2TempGPR = allocate();
3305             temp = op2TempGPR;
3306         } else {
3307             op2TempGPR = InvalidGPRReg;
3308             if (op1GPR == X86Registers::eax)
3309                 temp = X86Registers::edx;
3310             else
3311                 temp = X86Registers::eax;
3312         }