[JSC] Add support for GetByVal on arrays of Undecided shape
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGOSRExitFuzz.h"
38 #include "DFGSaneStringGetByValSlowPathGenerator.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DirectArguments.h"
41 #include "JSCInlines.h"
42 #include "JSEnvironmentRecord.h"
43 #include "JSLexicalEnvironment.h"
44 #include "LinkBuffer.h"
45 #include "ScopedArguments.h"
46 #include "ScratchRegisterAllocator.h"
47 #include "WriteBarrierBuffer.h"
48 #include <wtf/MathExtras.h>
49
50 namespace JSC { namespace DFG {
51
52 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
53     : m_compileOkay(true)
54     , m_jit(jit)
55     , m_currentNode(0)
56     , m_lastGeneratedNode(LastNodeType)
57     , m_indexInBlock(0)
58     , m_generationInfo(m_jit.graph().frameRegisterCount())
59     , m_state(m_jit.graph())
60     , m_interpreter(m_jit.graph(), m_state)
61     , m_stream(&jit.jitCode()->variableEventStream)
62     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
63     , m_isCheckingArgumentTypes(false)
64 {
65 }
66
67 SpeculativeJIT::~SpeculativeJIT()
68 {
69 }
70
71 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
72 {
73     ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
74     
75     GPRTemporary scratch(this);
76     GPRTemporary scratch2(this);
77     GPRReg scratchGPR = scratch.gpr();
78     GPRReg scratch2GPR = scratch2.gpr();
79     
80     unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
81     
82     JITCompiler::JumpList slowCases;
83     
84     slowCases.append(
85         emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
86     m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
87     emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
88     
89     m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
90     m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
91     
92     if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
93 #if USE(JSVALUE64)
94         m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
95         for (unsigned i = numElements; i < vectorLength; ++i)
96             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
97 #else
98         EncodedValueDescriptor value;
99         value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
100         for (unsigned i = numElements; i < vectorLength; ++i) {
101             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
102             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
103         }
104 #endif
105     }
106     
107     // I want a slow path that also loads out the storage pointer, and that's
108     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
109     // of work for a very small piece of functionality. :-/
110     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
111         slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
112         structure, numElements));
113 }
114
115 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
116 {
117     if (inlineCallFrame && !inlineCallFrame->isVarargs())
118         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
119     else {
120         VirtualRegister argumentCountRegister;
121         if (!inlineCallFrame)
122             argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
123         else
124             argumentCountRegister = inlineCallFrame->argumentCountRegister;
125         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
126         if (!includeThis)
127             m_jit.sub32(TrustedImm32(1), lengthGPR);
128     }
129 }
130
131 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
132 {
133     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
134 }
135
136 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
137 {
138     if (origin.inlineCallFrame) {
139         if (origin.inlineCallFrame->isClosureCall) {
140             m_jit.loadPtr(
141                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
142                 calleeGPR);
143         } else {
144             m_jit.move(
145                 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
146                 calleeGPR);
147         }
148     } else
149         m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), calleeGPR);
150 }
151
152 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
153 {
154     m_jit.addPtr(
155         TrustedImm32(
156             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
157         GPRInfo::callFrameRegister, startGPR);
158 }
159
160 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
161 {
162     if (!doOSRExitFuzzing())
163         return MacroAssembler::Jump();
164     
165     MacroAssembler::Jump result;
166     
167     m_jit.pushToSave(GPRInfo::regT0);
168     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
169     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
170     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
171     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
172     unsigned at = Options::fireOSRExitFuzzAt();
173     if (at || atOrAfter) {
174         unsigned threshold;
175         MacroAssembler::RelationalCondition condition;
176         if (atOrAfter) {
177             threshold = atOrAfter;
178             condition = MacroAssembler::Below;
179         } else {
180             threshold = at;
181             condition = MacroAssembler::NotEqual;
182         }
183         MacroAssembler::Jump ok = m_jit.branch32(
184             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
185         m_jit.popToRestore(GPRInfo::regT0);
186         result = m_jit.jump();
187         ok.link(&m_jit);
188     }
189     m_jit.popToRestore(GPRInfo::regT0);
190     
191     return result;
192 }
193
194 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
195 {
196     if (!m_compileOkay)
197         return;
198     ASSERT(m_isCheckingArgumentTypes || m_canExit);
199     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
200     if (fuzzJump.isSet()) {
201         JITCompiler::JumpList jumpsToFail;
202         jumpsToFail.append(fuzzJump);
203         jumpsToFail.append(jumpToFail);
204         m_jit.appendExitInfo(jumpsToFail);
205     } else
206         m_jit.appendExitInfo(jumpToFail);
207     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
208 }
209
210 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
211 {
212     if (!m_compileOkay)
213         return;
214     ASSERT(m_isCheckingArgumentTypes || m_canExit);
215     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
216     if (fuzzJump.isSet()) {
217         JITCompiler::JumpList myJumpsToFail;
218         myJumpsToFail.append(jumpsToFail);
219         myJumpsToFail.append(fuzzJump);
220         m_jit.appendExitInfo(myJumpsToFail);
221     } else
222         m_jit.appendExitInfo(jumpsToFail);
223     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
224 }
225
226 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
227 {
228     if (!m_compileOkay)
229         return OSRExitJumpPlaceholder();
230     ASSERT(m_isCheckingArgumentTypes || m_canExit);
231     unsigned index = m_jit.jitCode()->osrExit.size();
232     m_jit.appendExitInfo();
233     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
234     return OSRExitJumpPlaceholder(index);
235 }
236
237 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
238 {
239     ASSERT(m_isCheckingArgumentTypes || m_canExit);
240     return speculationCheck(kind, jsValueSource, nodeUse.node());
241 }
242
243 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
244 {
245     ASSERT(m_isCheckingArgumentTypes || m_canExit);
246     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
247 }
248
249 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
250 {
251     ASSERT(m_isCheckingArgumentTypes || m_canExit);
252     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
253 }
254
255 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
256 {
257     if (!m_compileOkay)
258         return;
259     ASSERT(m_isCheckingArgumentTypes || m_canExit);
260     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
261     m_jit.appendExitInfo(jumpToFail);
262     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
263 }
264
265 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
266 {
267     ASSERT(m_isCheckingArgumentTypes || m_canExit);
268     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
269 }
270
271 void SpeculativeJIT::emitInvalidationPoint(Node* node)
272 {
273     if (!m_compileOkay)
274         return;
275     ASSERT(m_canExit);
276     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
277     m_jit.jitCode()->appendOSRExit(OSRExit(
278         UncountableInvalidation, JSValueSource(),
279         m_jit.graph().methodOfGettingAValueProfileFor(node),
280         this, m_stream->size()));
281     info.m_replacementSource = m_jit.watchpointLabel();
282     ASSERT(info.m_replacementSource.isSet());
283     noResult(node);
284 }
285
286 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
287 {
288     ASSERT(m_isCheckingArgumentTypes || m_canExit);
289     if (!m_compileOkay)
290         return;
291     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
292     m_compileOkay = false;
293     if (verboseCompilationEnabled())
294         dataLog("Bailing compilation.\n");
295 }
296
297 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
298 {
299     ASSERT(m_isCheckingArgumentTypes || m_canExit);
300     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
301 }
302
303 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
304 {
305     ASSERT(needsTypeCheck(edge, typesPassedThrough));
306     m_interpreter.filter(edge, typesPassedThrough);
307     speculationCheck(BadType, source, edge.node(), jumpToFail);
308 }
309
310 RegisterSet SpeculativeJIT::usedRegisters()
311 {
312     RegisterSet result;
313     
314     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
315         GPRReg gpr = GPRInfo::toRegister(i);
316         if (m_gprs.isInUse(gpr))
317             result.set(gpr);
318     }
319     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
320         FPRReg fpr = FPRInfo::toRegister(i);
321         if (m_fprs.isInUse(fpr))
322             result.set(fpr);
323     }
324     
325     result.merge(RegisterSet::specialRegisters());
326     
327     return result;
328 }
329
330 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
331 {
332     m_slowPathGenerators.append(WTF::move(slowPathGenerator));
333 }
334
335 void SpeculativeJIT::runSlowPathGenerators()
336 {
337     for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
338         m_slowPathGenerators[i]->generate(this);
339 }
340
341 // On Windows we need to wrap fmod; on other platforms we can call it directly.
342 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
343 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
344 static double JIT_OPERATION fmodAsDFGOperation(double x, double y)
345 {
346     return fmod(x, y);
347 }
348 #else
349 #define fmodAsDFGOperation fmod
350 #endif
351
352 void SpeculativeJIT::clearGenerationInfo()
353 {
354     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
355         m_generationInfo[i] = GenerationInfo();
356     m_gprs = RegisterBank<GPRInfo>();
357     m_fprs = RegisterBank<FPRInfo>();
358 }
359
360 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
361 {
362     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
363     Node* node = info.node();
364     DataFormat registerFormat = info.registerFormat();
365     ASSERT(registerFormat != DataFormatNone);
366     ASSERT(registerFormat != DataFormatDouble);
367         
368     SilentSpillAction spillAction;
369     SilentFillAction fillAction;
370         
371     if (!info.needsSpill())
372         spillAction = DoNothingForSpill;
373     else {
374 #if USE(JSVALUE64)
375         ASSERT(info.gpr() == source);
376         if (registerFormat == DataFormatInt32)
377             spillAction = Store32Payload;
378         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
379             spillAction = StorePtr;
380         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
381             spillAction = Store64;
382         else {
383             ASSERT(registerFormat & DataFormatJS);
384             spillAction = Store64;
385         }
386 #elif USE(JSVALUE32_64)
387         if (registerFormat & DataFormatJS) {
388             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
389             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
390         } else {
391             ASSERT(info.gpr() == source);
392             spillAction = Store32Payload;
393         }
394 #endif
395     }
396         
397     if (registerFormat == DataFormatInt32) {
398         ASSERT(info.gpr() == source);
399         ASSERT(isJSInt32(info.registerFormat()));
400         if (node->hasConstant()) {
401             ASSERT(node->isInt32Constant());
402             fillAction = SetInt32Constant;
403         } else
404             fillAction = Load32Payload;
405     } else if (registerFormat == DataFormatBoolean) {
406 #if USE(JSVALUE64)
407         RELEASE_ASSERT_NOT_REACHED();
408 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
409         fillAction = DoNothingForFill;
410 #endif
411 #elif USE(JSVALUE32_64)
412         ASSERT(info.gpr() == source);
413         if (node->hasConstant()) {
414             ASSERT(node->isBooleanConstant());
415             fillAction = SetBooleanConstant;
416         } else
417             fillAction = Load32Payload;
418 #endif
419     } else if (registerFormat == DataFormatCell) {
420         ASSERT(info.gpr() == source);
421         if (node->hasConstant()) {
422             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
423             node->asCell(); // To get the assertion.
424             fillAction = SetCellConstant;
425         } else {
426 #if USE(JSVALUE64)
427             fillAction = LoadPtr;
428 #else
429             fillAction = Load32Payload;
430 #endif
431         }
432     } else if (registerFormat == DataFormatStorage) {
433         ASSERT(info.gpr() == source);
434         fillAction = LoadPtr;
435     } else if (registerFormat == DataFormatInt52) {
436         if (node->hasConstant())
437             fillAction = SetInt52Constant;
438         else if (info.spillFormat() == DataFormatInt52)
439             fillAction = Load64;
440         else if (info.spillFormat() == DataFormatStrictInt52)
441             fillAction = Load64ShiftInt52Left;
442         else if (info.spillFormat() == DataFormatNone)
443             fillAction = Load64;
444         else {
445             RELEASE_ASSERT_NOT_REACHED();
446 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
447             fillAction = Load64; // Make GCC happy.
448 #endif
449         }
450     } else if (registerFormat == DataFormatStrictInt52) {
451         if (node->hasConstant())
452             fillAction = SetStrictInt52Constant;
453         else if (info.spillFormat() == DataFormatInt52)
454             fillAction = Load64ShiftInt52Right;
455         else if (info.spillFormat() == DataFormatStrictInt52)
456             fillAction = Load64;
457         else if (info.spillFormat() == DataFormatNone)
458             fillAction = Load64;
459         else {
460             RELEASE_ASSERT_NOT_REACHED();
461 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
462             fillAction = Load64; // Make GCC happy.
463 #endif
464         }
465     } else {
466         ASSERT(registerFormat & DataFormatJS);
467 #if USE(JSVALUE64)
468         ASSERT(info.gpr() == source);
469         if (node->hasConstant()) {
470             if (node->isCellConstant())
471                 fillAction = SetTrustedJSConstant;
472             else
473                 fillAction = SetJSConstant;
474         } else if (info.spillFormat() == DataFormatInt32) {
475             ASSERT(registerFormat == DataFormatJSInt32);
476             fillAction = Load32PayloadBoxInt;
477         } else
478             fillAction = Load64;
479 #else
480         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
481         if (node->hasConstant())
482             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
483         else if (info.payloadGPR() == source)
484             fillAction = Load32Payload;
485         else { // Fill the Tag
486             switch (info.spillFormat()) {
487             case DataFormatInt32:
488                 ASSERT(registerFormat == DataFormatJSInt32);
489                 fillAction = SetInt32Tag;
490                 break;
491             case DataFormatCell:
492                 ASSERT(registerFormat == DataFormatJSCell);
493                 fillAction = SetCellTag;
494                 break;
495             case DataFormatBoolean:
496                 ASSERT(registerFormat == DataFormatJSBoolean);
497                 fillAction = SetBooleanTag;
498                 break;
499             default:
500                 fillAction = Load32Tag;
501                 break;
502             }
503         }
504 #endif
505     }
506         
507     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
508 }
509     
510 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
511 {
512     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
513     Node* node = info.node();
514     ASSERT(info.registerFormat() == DataFormatDouble);
515
516     SilentSpillAction spillAction;
517     SilentFillAction fillAction;
518         
519     if (!info.needsSpill())
520         spillAction = DoNothingForSpill;
521     else {
522         ASSERT(!node->hasConstant());
523         ASSERT(info.spillFormat() == DataFormatNone);
524         ASSERT(info.fpr() == source);
525         spillAction = StoreDouble;
526     }
527         
528 #if USE(JSVALUE64)
529     if (node->hasConstant()) {
530         node->asNumber(); // To get the assertion.
531         fillAction = SetDoubleConstant;
532     } else {
533         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
534         fillAction = LoadDouble;
535     }
536 #elif USE(JSVALUE32_64)
537     ASSERT(info.registerFormat() == DataFormatDouble);
538     if (node->hasConstant()) {
539         node->asNumber(); // To get the assertion.
540         fillAction = SetDoubleConstant;
541     } else
542         fillAction = LoadDouble;
543 #endif
544
545     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
546 }
547     
548 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
549 {
550     switch (plan.spillAction()) {
551     case DoNothingForSpill:
552         break;
553     case Store32Tag:
554         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
555         break;
556     case Store32Payload:
557         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
558         break;
559     case StorePtr:
560         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
561         break;
562 #if USE(JSVALUE64)
563     case Store64:
564         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
565         break;
566 #endif
567     case StoreDouble:
568         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
569         break;
570     default:
571         RELEASE_ASSERT_NOT_REACHED();
572     }
573 }
574     
575 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
576 {
577 #if USE(JSVALUE32_64)
578     UNUSED_PARAM(canTrample);
579 #endif
580     switch (plan.fillAction()) {
581     case DoNothingForFill:
582         break;
583     case SetInt32Constant:
584         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
585         break;
586 #if USE(JSVALUE64)
587     case SetInt52Constant:
588         m_jit.move(Imm64(plan.node()->asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
589         break;
590     case SetStrictInt52Constant:
591         m_jit.move(Imm64(plan.node()->asMachineInt()), plan.gpr());
592         break;
593 #endif // USE(JSVALUE64)
594     case SetBooleanConstant:
595         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
596         break;
597     case SetCellConstant:
598         m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
599         break;
600 #if USE(JSVALUE64)
601     case SetTrustedJSConstant:
602         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
603         break;
604     case SetJSConstant:
605         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
606         break;
607     case SetDoubleConstant:
608         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
609         m_jit.move64ToDouble(canTrample, plan.fpr());
610         break;
611     case Load32PayloadBoxInt:
612         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
613         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
614         break;
615     case Load32PayloadConvertToInt52:
616         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
617         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
618         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
619         break;
620     case Load32PayloadSignExtend:
621         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
622         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
623         break;
624 #else
625     case SetJSConstantTag:
626         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
627         break;
628     case SetJSConstantPayload:
629         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
630         break;
631     case SetInt32Tag:
632         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
633         break;
634     case SetCellTag:
635         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
636         break;
637     case SetBooleanTag:
638         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
639         break;
640     case SetDoubleConstant:
641         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
642         break;
643 #endif
644     case Load32Tag:
645         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
646         break;
647     case Load32Payload:
648         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
649         break;
650     case LoadPtr:
651         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
652         break;
653 #if USE(JSVALUE64)
654     case Load64:
655         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
656         break;
657     case Load64ShiftInt52Right:
658         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
659         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
660         break;
661     case Load64ShiftInt52Left:
662         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
663         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
664         break;
665 #endif
666     case LoadDouble:
667         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
668         break;
669     default:
670         RELEASE_ASSERT_NOT_REACHED();
671     }
672 }
673     
674 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
675 {
676     switch (arrayMode.arrayClass()) {
677     case Array::OriginalArray: {
678         CRASH();
679 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
680         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
681         return result;
682 #endif
683     }
684         
685     case Array::Array:
686         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
687         return m_jit.branch32(
688             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
689         
690     case Array::NonArray:
691     case Array::OriginalNonArray:
692         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
693         return m_jit.branch32(
694             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
695         
696     case Array::PossiblyArray:
697         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
698         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
699     }
700     
701     RELEASE_ASSERT_NOT_REACHED();
702     return JITCompiler::Jump();
703 }
704
705 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
706 {
707     JITCompiler::JumpList result;
708     
709     switch (arrayMode.type()) {
710     case Array::Int32:
711         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
712
713     case Array::Double:
714         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
715
716     case Array::Contiguous:
717         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
718
719     case Array::Undecided:
720         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
721
722     case Array::ArrayStorage:
723     case Array::SlowPutArrayStorage: {
724         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
725         
726         if (arrayMode.isJSArray()) {
727             if (arrayMode.isSlowPut()) {
728                 result.append(
729                     m_jit.branchTest32(
730                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
731                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
732                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
733                 result.append(
734                     m_jit.branch32(
735                         MacroAssembler::Above, tempGPR,
736                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
737                 break;
738             }
739             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
740             result.append(
741                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
742             break;
743         }
744         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
745         if (arrayMode.isSlowPut()) {
746             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
747             result.append(
748                 m_jit.branch32(
749                     MacroAssembler::Above, tempGPR,
750                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
751             break;
752         }
753         result.append(
754             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
755         break;
756     }
757     default:
758         CRASH();
759         break;
760     }
761     
762     return result;
763 }
764
765 void SpeculativeJIT::checkArray(Node* node)
766 {
767     ASSERT(node->arrayMode().isSpecific());
768     ASSERT(!node->arrayMode().doesConversion());
769     
770     SpeculateCellOperand base(this, node->child1());
771     GPRReg baseReg = base.gpr();
772     
773     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
774         noResult(m_currentNode);
775         return;
776     }
777     
778     const ClassInfo* expectedClassInfo = 0;
779     
780     switch (node->arrayMode().type()) {
781     case Array::String:
782         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
783         break;
784     case Array::Int32:
785     case Array::Double:
786     case Array::Contiguous:
787     case Array::Undecided:
788     case Array::ArrayStorage:
789     case Array::SlowPutArrayStorage: {
790         GPRTemporary temp(this);
791         GPRReg tempGPR = temp.gpr();
792         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
793         speculationCheck(
794             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
795             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
796         
797         noResult(m_currentNode);
798         return;
799     }
800     case Array::DirectArguments:
801         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
802         noResult(m_currentNode);
803         return;
804     case Array::ScopedArguments:
805         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
806         noResult(m_currentNode);
807         return;
808     default:
809         speculateCellTypeWithoutTypeFiltering(
810             node->child1(), baseReg,
811             typeForTypedArrayType(node->arrayMode().typedArrayType()));
812         noResult(m_currentNode);
813         return;
814     }
815     
816     RELEASE_ASSERT(expectedClassInfo);
817     
818     GPRTemporary temp(this);
819     GPRTemporary temp2(this);
820     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
821     speculationCheck(
822         BadType, JSValueSource::unboxedCell(baseReg), node,
823         m_jit.branchPtr(
824             MacroAssembler::NotEqual,
825             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
826             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
827     
828     noResult(m_currentNode);
829 }
830
831 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
832 {
833     ASSERT(node->arrayMode().doesConversion());
834     
835     GPRTemporary temp(this);
836     GPRTemporary structure;
837     GPRReg tempGPR = temp.gpr();
838     GPRReg structureGPR = InvalidGPRReg;
839     
840     if (node->op() != ArrayifyToStructure) {
841         GPRTemporary realStructure(this);
842         structure.adopt(realStructure);
843         structureGPR = structure.gpr();
844     }
845         
846     // We can skip all that comes next if we already have array storage.
847     MacroAssembler::JumpList slowPath;
848     
849     if (node->op() == ArrayifyToStructure) {
850         slowPath.append(m_jit.branchWeakStructure(
851             JITCompiler::NotEqual,
852             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
853             node->structure()));
854     } else {
855         m_jit.load8(
856             MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
857         
858         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
859     }
860     
861     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
862         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
863     
864     noResult(m_currentNode);
865 }
866
867 void SpeculativeJIT::arrayify(Node* node)
868 {
869     ASSERT(node->arrayMode().isSpecific());
870     
871     SpeculateCellOperand base(this, node->child1());
872     
873     if (!node->child2()) {
874         arrayify(node, base.gpr(), InvalidGPRReg);
875         return;
876     }
877     
878     SpeculateInt32Operand property(this, node->child2());
879     
880     arrayify(node, base.gpr(), property.gpr());
881 }
882
883 GPRReg SpeculativeJIT::fillStorage(Edge edge)
884 {
885     VirtualRegister virtualRegister = edge->virtualRegister();
886     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
887     
888     switch (info.registerFormat()) {
889     case DataFormatNone: {
890         if (info.spillFormat() == DataFormatStorage) {
891             GPRReg gpr = allocate();
892             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
893             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
894             info.fillStorage(*m_stream, gpr);
895             return gpr;
896         }
897         
898         // Must be a cell; fill it as a cell and then return the pointer.
899         return fillSpeculateCell(edge);
900     }
901         
902     case DataFormatStorage: {
903         GPRReg gpr = info.gpr();
904         m_gprs.lock(gpr);
905         return gpr;
906     }
907         
908     default:
909         return fillSpeculateCell(edge);
910     }
911 }
912
913 void SpeculativeJIT::useChildren(Node* node)
914 {
915     if (node->flags() & NodeHasVarArgs) {
916         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
917             if (!!m_jit.graph().m_varArgChildren[childIdx])
918                 use(m_jit.graph().m_varArgChildren[childIdx]);
919         }
920     } else {
921         Edge child1 = node->child1();
922         if (!child1) {
923             ASSERT(!node->child2() && !node->child3());
924             return;
925         }
926         use(child1);
927         
928         Edge child2 = node->child2();
929         if (!child2) {
930             ASSERT(!node->child3());
931             return;
932         }
933         use(child2);
934         
935         Edge child3 = node->child3();
936         if (!child3)
937             return;
938         use(child3);
939     }
940 }
941
942 void SpeculativeJIT::compileIn(Node* node)
943 {
944     SpeculateCellOperand base(this, node->child2());
945     GPRReg baseGPR = base.gpr();
946     
947     if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
948         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
949             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo();
950             
951             GPRTemporary result(this);
952             GPRReg resultGPR = result.gpr();
953
954             use(node->child1());
955             
956             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
957             MacroAssembler::Label done = m_jit.label();
958             
959             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
960             // we can cast it to const AtomicStringImpl* safely.
961             auto slowPath = slowPathCall(
962                 jump.m_jump, this, operationInOptimize,
963                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
964                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
965             
966             stubInfo->codeOrigin = node->origin.semantic;
967             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
968             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
969             stubInfo->patch.usedRegisters = usedRegisters();
970             stubInfo->patch.spillMode = NeedToSpill;
971
972             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
973             addSlowPathGenerator(WTF::move(slowPath));
974
975             base.use();
976
977             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
978             return;
979         }
980     }
981
982     JSValueOperand key(this, node->child1());
983     JSValueRegs regs = key.jsValueRegs();
984         
985     GPRFlushedCallResult result(this);
986     GPRReg resultGPR = result.gpr();
987         
988     base.use();
989     key.use();
990         
991     flushRegisters();
992     callOperation(
993         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
994         baseGPR, regs);
995     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
996 }
997
998 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
999 {
1000     unsigned branchIndexInBlock = detectPeepHoleBranch();
1001     if (branchIndexInBlock != UINT_MAX) {
1002         Node* branchNode = m_block->at(branchIndexInBlock);
1003
1004         ASSERT(node->adjustedRefCount() == 1);
1005         
1006         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1007     
1008         m_indexInBlock = branchIndexInBlock;
1009         m_currentNode = branchNode;
1010         
1011         return true;
1012     }
1013     
1014     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1015     
1016     return false;
1017 }
1018
1019 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1020 {
1021     unsigned branchIndexInBlock = detectPeepHoleBranch();
1022     if (branchIndexInBlock != UINT_MAX) {
1023         Node* branchNode = m_block->at(branchIndexInBlock);
1024
1025         ASSERT(node->adjustedRefCount() == 1);
1026         
1027         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1028     
1029         m_indexInBlock = branchIndexInBlock;
1030         m_currentNode = branchNode;
1031         
1032         return true;
1033     }
1034     
1035     nonSpeculativeNonPeepholeStrictEq(node, invert);
1036     
1037     return false;
1038 }
1039
1040 static const char* dataFormatString(DataFormat format)
1041 {
1042     // These values correspond to the DataFormat enum.
1043     const char* strings[] = {
1044         "[  ]",
1045         "[ i]",
1046         "[ d]",
1047         "[ c]",
1048         "Err!",
1049         "Err!",
1050         "Err!",
1051         "Err!",
1052         "[J ]",
1053         "[Ji]",
1054         "[Jd]",
1055         "[Jc]",
1056         "Err!",
1057         "Err!",
1058         "Err!",
1059         "Err!",
1060     };
1061     return strings[format];
1062 }
1063
1064 void SpeculativeJIT::dump(const char* label)
1065 {
1066     if (label)
1067         dataLogF("<%s>\n", label);
1068
1069     dataLogF("  gprs:\n");
1070     m_gprs.dump();
1071     dataLogF("  fprs:\n");
1072     m_fprs.dump();
1073     dataLogF("  VirtualRegisters:\n");
1074     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1075         GenerationInfo& info = m_generationInfo[i];
1076         if (info.alive())
1077             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1078         else
1079             dataLogF("    % 3d:[__][__]", i);
1080         if (info.registerFormat() == DataFormatDouble)
1081             dataLogF(":fpr%d\n", info.fpr());
1082         else if (info.registerFormat() != DataFormatNone
1083 #if USE(JSVALUE32_64)
1084             && !(info.registerFormat() & DataFormatJS)
1085 #endif
1086             ) {
1087             ASSERT(info.gpr() != InvalidGPRReg);
1088             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1089         } else
1090             dataLogF("\n");
1091     }
1092     if (label)
1093         dataLogF("</%s>\n", label);
1094 }
1095
1096 GPRTemporary::GPRTemporary()
1097     : m_jit(0)
1098     , m_gpr(InvalidGPRReg)
1099 {
1100 }
1101
1102 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1103     : m_jit(jit)
1104     , m_gpr(InvalidGPRReg)
1105 {
1106     m_gpr = m_jit->allocate();
1107 }
1108
1109 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1110     : m_jit(jit)
1111     , m_gpr(InvalidGPRReg)
1112 {
1113     m_gpr = m_jit->allocate(specific);
1114 }
1115
1116 #if USE(JSVALUE32_64)
1117 GPRTemporary::GPRTemporary(
1118     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1119     : m_jit(jit)
1120     , m_gpr(InvalidGPRReg)
1121 {
1122     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1123         m_gpr = m_jit->reuse(op1.gpr(which));
1124     else
1125         m_gpr = m_jit->allocate();
1126 }
1127 #endif // USE(JSVALUE32_64)
1128
1129 JSValueRegsTemporary::JSValueRegsTemporary() { }
1130
1131 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1132 #if USE(JSVALUE64)
1133     : m_gpr(jit)
1134 #else
1135     : m_payloadGPR(jit)
1136     , m_tagGPR(jit)
1137 #endif
1138 {
1139 }
1140
1141 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1142
1143 JSValueRegs JSValueRegsTemporary::regs()
1144 {
1145 #if USE(JSVALUE64)
1146     return JSValueRegs(m_gpr.gpr());
1147 #else
1148     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1149 #endif
1150 }
1151
1152 void GPRTemporary::adopt(GPRTemporary& other)
1153 {
1154     ASSERT(!m_jit);
1155     ASSERT(m_gpr == InvalidGPRReg);
1156     ASSERT(other.m_jit);
1157     ASSERT(other.m_gpr != InvalidGPRReg);
1158     m_jit = other.m_jit;
1159     m_gpr = other.m_gpr;
1160     other.m_jit = 0;
1161     other.m_gpr = InvalidGPRReg;
1162 }
1163
1164 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1165     : m_jit(jit)
1166     , m_fpr(InvalidFPRReg)
1167 {
1168     m_fpr = m_jit->fprAllocate();
1169 }
1170
1171 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1172     : m_jit(jit)
1173     , m_fpr(InvalidFPRReg)
1174 {
1175     if (m_jit->canReuse(op1.node()))
1176         m_fpr = m_jit->reuse(op1.fpr());
1177     else
1178         m_fpr = m_jit->fprAllocate();
1179 }
1180
1181 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1182     : m_jit(jit)
1183     , m_fpr(InvalidFPRReg)
1184 {
1185     if (m_jit->canReuse(op1.node()))
1186         m_fpr = m_jit->reuse(op1.fpr());
1187     else if (m_jit->canReuse(op2.node()))
1188         m_fpr = m_jit->reuse(op2.fpr());
1189     else
1190         m_fpr = m_jit->fprAllocate();
1191 }
1192
1193 #if USE(JSVALUE32_64)
1194 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1195     : m_jit(jit)
1196     , m_fpr(InvalidFPRReg)
1197 {
1198     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1199         m_fpr = m_jit->reuse(op1.fpr());
1200     else
1201         m_fpr = m_jit->fprAllocate();
1202 }
1203 #endif
1204
1205 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1206 {
1207     BasicBlock* taken = branchNode->branchData()->taken.block;
1208     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1209     
1210     SpeculateDoubleOperand op1(this, node->child1());
1211     SpeculateDoubleOperand op2(this, node->child2());
1212     
1213     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1214     jump(notTaken);
1215 }
1216
1217 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1218 {
1219     BasicBlock* taken = branchNode->branchData()->taken.block;
1220     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1221
1222     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1223     
1224     if (taken == nextBlock()) {
1225         condition = MacroAssembler::NotEqual;
1226         BasicBlock* tmp = taken;
1227         taken = notTaken;
1228         notTaken = tmp;
1229     }
1230
1231     SpeculateCellOperand op1(this, node->child1());
1232     SpeculateCellOperand op2(this, node->child2());
1233     
1234     GPRReg op1GPR = op1.gpr();
1235     GPRReg op2GPR = op2.gpr();
1236     
1237     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1238         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1239             speculationCheck(
1240                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1241         }
1242         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1243             speculationCheck(
1244                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1245         }
1246     } else {
1247         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1248             speculationCheck(
1249                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1250                 m_jit.branchIfNotObject(op1GPR));
1251         }
1252         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1253             m_jit.branchTest8(
1254                 MacroAssembler::NonZero, 
1255                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1256                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1257
1258         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1259             speculationCheck(
1260                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1261                 m_jit.branchIfNotObject(op2GPR));
1262         }
1263         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1264             m_jit.branchTest8(
1265                 MacroAssembler::NonZero, 
1266                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1267                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1268     }
1269
1270     branchPtr(condition, op1GPR, op2GPR, taken);
1271     jump(notTaken);
1272 }
1273
1274 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1275 {
1276     BasicBlock* taken = branchNode->branchData()->taken.block;
1277     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1278
1279     // The branch instruction will branch to the taken block.
1280     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1281     if (taken == nextBlock()) {
1282         condition = JITCompiler::invert(condition);
1283         BasicBlock* tmp = taken;
1284         taken = notTaken;
1285         notTaken = tmp;
1286     }
1287
1288     if (node->child1()->isBooleanConstant()) {
1289         bool imm = node->child1()->asBoolean();
1290         SpeculateBooleanOperand op2(this, node->child2());
1291         branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1292     } else if (node->child2()->isBooleanConstant()) {
1293         SpeculateBooleanOperand op1(this, node->child1());
1294         bool imm = node->child2()->asBoolean();
1295         branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1296     } else {
1297         SpeculateBooleanOperand op1(this, node->child1());
1298         SpeculateBooleanOperand op2(this, node->child2());
1299         branch32(condition, op1.gpr(), op2.gpr(), taken);
1300     }
1301
1302     jump(notTaken);
1303 }
1304
1305 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1306 {
1307     BasicBlock* taken = branchNode->branchData()->taken.block;
1308     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1309
1310     // The branch instruction will branch to the taken block.
1311     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1312     if (taken == nextBlock()) {
1313         condition = JITCompiler::invert(condition);
1314         BasicBlock* tmp = taken;
1315         taken = notTaken;
1316         notTaken = tmp;
1317     }
1318
1319     if (node->child1()->isInt32Constant()) {
1320         int32_t imm = node->child1()->asInt32();
1321         SpeculateInt32Operand op2(this, node->child2());
1322         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1323     } else if (node->child2()->isInt32Constant()) {
1324         SpeculateInt32Operand op1(this, node->child1());
1325         int32_t imm = node->child2()->asInt32();
1326         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1327     } else {
1328         SpeculateInt32Operand op1(this, node->child1());
1329         SpeculateInt32Operand op2(this, node->child2());
1330         branch32(condition, op1.gpr(), op2.gpr(), taken);
1331     }
1332
1333     jump(notTaken);
1334 }
1335
1336 // Returns true if the compare is fused with a subsequent branch.
1337 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1338 {
1339     // Fused compare & branch.
1340     unsigned branchIndexInBlock = detectPeepHoleBranch();
1341     if (branchIndexInBlock != UINT_MAX) {
1342         Node* branchNode = m_block->at(branchIndexInBlock);
1343
1344         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1345         // so can be no intervening nodes to also reference the compare. 
1346         ASSERT(node->adjustedRefCount() == 1);
1347
1348         if (node->isBinaryUseKind(Int32Use))
1349             compilePeepHoleInt32Branch(node, branchNode, condition);
1350 #if USE(JSVALUE64)
1351         else if (node->isBinaryUseKind(Int52RepUse))
1352             compilePeepHoleInt52Branch(node, branchNode, condition);
1353 #endif // USE(JSVALUE64)
1354         else if (node->isBinaryUseKind(DoubleRepUse))
1355             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1356         else if (node->op() == CompareEq) {
1357             if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1358                 // Use non-peephole comparison, for now.
1359                 return false;
1360             }
1361             if (node->isBinaryUseKind(BooleanUse))
1362                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1363             else if (node->isBinaryUseKind(ObjectUse))
1364                 compilePeepHoleObjectEquality(node, branchNode);
1365             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1366                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1367             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1368                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1369             else {
1370                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1371                 return true;
1372             }
1373         } else {
1374             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1375             return true;
1376         }
1377
1378         use(node->child1());
1379         use(node->child2());
1380         m_indexInBlock = branchIndexInBlock;
1381         m_currentNode = branchNode;
1382         return true;
1383     }
1384     return false;
1385 }
1386
1387 void SpeculativeJIT::noticeOSRBirth(Node* node)
1388 {
1389     if (!node->hasVirtualRegister())
1390         return;
1391     
1392     VirtualRegister virtualRegister = node->virtualRegister();
1393     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1394     
1395     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1396 }
1397
1398 void SpeculativeJIT::compileMovHint(Node* node)
1399 {
1400     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1401     
1402     Node* child = node->child1().node();
1403     noticeOSRBirth(child);
1404     
1405     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1406 }
1407
1408 void SpeculativeJIT::bail(AbortReason reason)
1409 {
1410     if (verboseCompilationEnabled())
1411         dataLog("Bailing compilation.\n");
1412     m_compileOkay = true;
1413     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1414     clearGenerationInfo();
1415 }
1416
1417 void SpeculativeJIT::compileCurrentBlock()
1418 {
1419     ASSERT(m_compileOkay);
1420     
1421     if (!m_block)
1422         return;
1423     
1424     ASSERT(m_block->isReachable);
1425     
1426     m_jit.blockHeads()[m_block->index] = m_jit.label();
1427
1428     if (!m_block->intersectionOfCFAHasVisited) {
1429         // Don't generate code for basic blocks that are unreachable according to CFA.
1430         // But to be sure that nobody has generated a jump to this block, drop in a
1431         // breakpoint here.
1432         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1433         return;
1434     }
1435
1436     m_stream->appendAndLog(VariableEvent::reset());
1437     
1438     m_jit.jitAssertHasValidCallFrame();
1439     m_jit.jitAssertTagsInPlace();
1440     m_jit.jitAssertArgumentCountSane();
1441
1442     m_state.reset();
1443     m_state.beginBasicBlock(m_block);
1444     
1445     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1446         int operand = m_block->variablesAtHead.operandForIndex(i);
1447         Node* node = m_block->variablesAtHead[i];
1448         if (!node)
1449             continue; // No need to record dead SetLocal's.
1450         
1451         VariableAccessData* variable = node->variableAccessData();
1452         DataFormat format;
1453         if (!node->refCount())
1454             continue; // No need to record dead SetLocal's.
1455         format = dataFormatFor(variable->flushFormat());
1456         m_stream->appendAndLog(
1457             VariableEvent::setLocal(
1458                 VirtualRegister(operand),
1459                 variable->machineLocal(),
1460                 format));
1461     }
1462     
1463     m_codeOriginForExitTarget = CodeOrigin();
1464     m_codeOriginForExitProfile = CodeOrigin();
1465     
1466     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1467         m_currentNode = m_block->at(m_indexInBlock);
1468         
1469         // We may have hit a contradiction that the CFA was aware of but that the JIT
1470         // didn't cause directly.
1471         if (!m_state.isValid()) {
1472             bail(DFGBailedAtTopOfBlock);
1473             return;
1474         }
1475
1476         if (ASSERT_DISABLED)
1477             m_canExit = true; // Essentially disable the assertions.
1478         else
1479             m_canExit = mayExit(m_jit.graph(), m_currentNode);
1480         
1481         m_interpreter.startExecuting();
1482         m_jit.setForNode(m_currentNode);
1483         m_codeOriginForExitTarget = m_currentNode->origin.forExit;
1484         m_codeOriginForExitProfile = m_currentNode->origin.semantic;
1485         m_lastGeneratedNode = m_currentNode->op();
1486         
1487         ASSERT(m_currentNode->shouldGenerate());
1488         
1489         if (verboseCompilationEnabled()) {
1490             dataLogF(
1491                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1492                 (int)m_currentNode->index(),
1493                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1494             dataLog("\n");
1495         }
1496         
1497         compile(m_currentNode);
1498         
1499         if (belongsInMinifiedGraph(m_currentNode->op()))
1500             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1501         
1502 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1503         m_jit.clearRegisterAllocationOffsets();
1504 #endif
1505         
1506         if (!m_compileOkay) {
1507             bail(DFGBailedAtEndOfNode);
1508             return;
1509         }
1510         
1511         // Make sure that the abstract state is rematerialized for the next node.
1512         m_interpreter.executeEffects(m_indexInBlock);
1513     }
1514     
1515     // Perform the most basic verification that children have been used correctly.
1516     if (!ASSERT_DISABLED) {
1517         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1518             GenerationInfo& info = m_generationInfo[index];
1519             RELEASE_ASSERT(!info.alive());
1520         }
1521     }
1522 }
1523
1524 // If we are making type predictions about our arguments then
1525 // we need to check that they are correct on function entry.
1526 void SpeculativeJIT::checkArgumentTypes()
1527 {
1528     ASSERT(!m_currentNode);
1529     m_isCheckingArgumentTypes = true;
1530     m_codeOriginForExitTarget = CodeOrigin(0);
1531     m_codeOriginForExitProfile = CodeOrigin(0);
1532
1533     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1534         Node* node = m_jit.graph().m_arguments[i];
1535         if (!node) {
1536             // The argument is dead. We don't do any checks for such arguments.
1537             continue;
1538         }
1539         
1540         ASSERT(node->op() == SetArgument);
1541         ASSERT(node->shouldGenerate());
1542
1543         VariableAccessData* variableAccessData = node->variableAccessData();
1544         FlushFormat format = variableAccessData->flushFormat();
1545         
1546         if (format == FlushedJSValue)
1547             continue;
1548         
1549         VirtualRegister virtualRegister = variableAccessData->local();
1550
1551         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1552         
1553 #if USE(JSVALUE64)
1554         switch (format) {
1555         case FlushedInt32: {
1556             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1557             break;
1558         }
1559         case FlushedBoolean: {
1560             GPRTemporary temp(this);
1561             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1562             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1563             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1564             break;
1565         }
1566         case FlushedCell: {
1567             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1568             break;
1569         }
1570         default:
1571             RELEASE_ASSERT_NOT_REACHED();
1572             break;
1573         }
1574 #else
1575         switch (format) {
1576         case FlushedInt32: {
1577             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1578             break;
1579         }
1580         case FlushedBoolean: {
1581             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1582             break;
1583         }
1584         case FlushedCell: {
1585             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1586             break;
1587         }
1588         default:
1589             RELEASE_ASSERT_NOT_REACHED();
1590             break;
1591         }
1592 #endif
1593     }
1594     m_isCheckingArgumentTypes = false;
1595 }
1596
1597 bool SpeculativeJIT::compile()
1598 {
1599     checkArgumentTypes();
1600     
1601     ASSERT(!m_currentNode);
1602     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1603         m_jit.setForBlockIndex(blockIndex);
1604         m_block = m_jit.graph().block(blockIndex);
1605         compileCurrentBlock();
1606     }
1607     linkBranches();
1608     return true;
1609 }
1610
1611 void SpeculativeJIT::createOSREntries()
1612 {
1613     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1614         BasicBlock* block = m_jit.graph().block(blockIndex);
1615         if (!block)
1616             continue;
1617         if (!block->isOSRTarget)
1618             continue;
1619         
1620         // Currently we don't have OSR entry trampolines. We could add them
1621         // here if need be.
1622         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1623     }
1624 }
1625
1626 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1627 {
1628     unsigned osrEntryIndex = 0;
1629     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1630         BasicBlock* block = m_jit.graph().block(blockIndex);
1631         if (!block)
1632             continue;
1633         if (!block->isOSRTarget)
1634             continue;
1635         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1636     }
1637     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1638     
1639     if (verboseCompilationEnabled()) {
1640         DumpContext dumpContext;
1641         dataLog("OSR Entries:\n");
1642         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1643             dataLog("    ", inContext(entryData, &dumpContext), "\n");
1644         if (!dumpContext.isEmpty())
1645             dumpContext.dump(WTF::dataFile());
1646     }
1647 }
1648
1649 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1650 {
1651     Edge child3 = m_jit.graph().varArgChild(node, 2);
1652     Edge child4 = m_jit.graph().varArgChild(node, 3);
1653
1654     ArrayMode arrayMode = node->arrayMode();
1655     
1656     GPRReg baseReg = base.gpr();
1657     GPRReg propertyReg = property.gpr();
1658     
1659     SpeculateDoubleOperand value(this, child3);
1660
1661     FPRReg valueReg = value.fpr();
1662     
1663     DFG_TYPE_CHECK(
1664         JSValueRegs(), child3, SpecFullRealNumber,
1665         m_jit.branchDouble(
1666             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1667     
1668     if (!m_compileOkay)
1669         return;
1670     
1671     StorageOperand storage(this, child4);
1672     GPRReg storageReg = storage.gpr();
1673
1674     if (node->op() == PutByValAlias) {
1675         // Store the value to the array.
1676         GPRReg propertyReg = property.gpr();
1677         FPRReg valueReg = value.fpr();
1678         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1679         
1680         noResult(m_currentNode);
1681         return;
1682     }
1683     
1684     GPRTemporary temporary;
1685     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1686
1687     MacroAssembler::Jump slowCase;
1688     
1689     if (arrayMode.isInBounds()) {
1690         speculationCheck(
1691             OutOfBounds, JSValueRegs(), 0,
1692             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1693     } else {
1694         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1695         
1696         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1697         
1698         if (!arrayMode.isOutOfBounds())
1699             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1700         
1701         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1702         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1703         
1704         inBounds.link(&m_jit);
1705     }
1706     
1707     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1708
1709     base.use();
1710     property.use();
1711     value.use();
1712     storage.use();
1713     
1714     if (arrayMode.isOutOfBounds()) {
1715         addSlowPathGenerator(
1716             slowPathCall(
1717                 slowCase, this,
1718                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1719                 NoResult, baseReg, propertyReg, valueReg));
1720     }
1721
1722     noResult(m_currentNode, UseChildrenCalledExplicitly);
1723 }
1724
1725 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1726 {
1727     SpeculateCellOperand string(this, node->child1());
1728     SpeculateStrictInt32Operand index(this, node->child2());
1729     StorageOperand storage(this, node->child3());
1730
1731     GPRReg stringReg = string.gpr();
1732     GPRReg indexReg = index.gpr();
1733     GPRReg storageReg = storage.gpr();
1734     
1735     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1736
1737     // unsigned comparison so we can filter out negative indices and indices that are too large
1738     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1739
1740     GPRTemporary scratch(this);
1741     GPRReg scratchReg = scratch.gpr();
1742
1743     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1744
1745     // Load the character into scratchReg
1746     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1747
1748     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1749     JITCompiler::Jump cont8Bit = m_jit.jump();
1750
1751     is16Bit.link(&m_jit);
1752
1753     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1754
1755     cont8Bit.link(&m_jit);
1756
1757     int32Result(scratchReg, m_currentNode);
1758 }
1759
1760 void SpeculativeJIT::compileGetByValOnString(Node* node)
1761 {
1762     SpeculateCellOperand base(this, node->child1());
1763     SpeculateStrictInt32Operand property(this, node->child2());
1764     StorageOperand storage(this, node->child3());
1765     GPRReg baseReg = base.gpr();
1766     GPRReg propertyReg = property.gpr();
1767     GPRReg storageReg = storage.gpr();
1768
1769     GPRTemporary scratch(this);
1770     GPRReg scratchReg = scratch.gpr();
1771 #if USE(JSVALUE32_64)
1772     GPRTemporary resultTag;
1773     GPRReg resultTagReg = InvalidGPRReg;
1774     if (node->arrayMode().isOutOfBounds()) {
1775         GPRTemporary realResultTag(this);
1776         resultTag.adopt(realResultTag);
1777         resultTagReg = resultTag.gpr();
1778     }
1779 #endif
1780
1781     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1782
1783     // unsigned comparison so we can filter out negative indices and indices that are too large
1784     JITCompiler::Jump outOfBounds = m_jit.branch32(
1785         MacroAssembler::AboveOrEqual, propertyReg,
1786         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1787     if (node->arrayMode().isInBounds())
1788         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1789
1790     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1791
1792     // Load the character into scratchReg
1793     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1794
1795     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1796     JITCompiler::Jump cont8Bit = m_jit.jump();
1797
1798     is16Bit.link(&m_jit);
1799
1800     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1801
1802     JITCompiler::Jump bigCharacter =
1803         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1804
1805     // 8 bit string values don't need the isASCII check.
1806     cont8Bit.link(&m_jit);
1807
1808     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1809     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1810     m_jit.loadPtr(scratchReg, scratchReg);
1811
1812     addSlowPathGenerator(
1813         slowPathCall(
1814             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1815
1816     if (node->arrayMode().isOutOfBounds()) {
1817 #if USE(JSVALUE32_64)
1818         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1819 #endif
1820
1821         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1822         if (globalObject->stringPrototypeChainIsSane()) {
1823             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
1824             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
1825             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
1826             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
1827             // indexed properties either.
1828             // https://bugs.webkit.org/show_bug.cgi?id=144668
1829             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
1830             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
1831             
1832 #if USE(JSVALUE64)
1833             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1834                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
1835 #else
1836             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1837                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
1838                 baseReg, propertyReg));
1839 #endif
1840         } else {
1841 #if USE(JSVALUE64)
1842             addSlowPathGenerator(
1843                 slowPathCall(
1844                     outOfBounds, this, operationGetByValStringInt,
1845                     scratchReg, baseReg, propertyReg));
1846 #else
1847             addSlowPathGenerator(
1848                 slowPathCall(
1849                     outOfBounds, this, operationGetByValStringInt,
1850                     resultTagReg, scratchReg, baseReg, propertyReg));
1851 #endif
1852         }
1853         
1854 #if USE(JSVALUE64)
1855         jsValueResult(scratchReg, m_currentNode);
1856 #else
1857         jsValueResult(resultTagReg, scratchReg, m_currentNode);
1858 #endif
1859     } else
1860         cellResult(scratchReg, m_currentNode);
1861 }
1862
1863 void SpeculativeJIT::compileFromCharCode(Node* node)
1864 {
1865     SpeculateStrictInt32Operand property(this, node->child1());
1866     GPRReg propertyReg = property.gpr();
1867     GPRTemporary smallStrings(this);
1868     GPRTemporary scratch(this);
1869     GPRReg scratchReg = scratch.gpr();
1870     GPRReg smallStringsReg = smallStrings.gpr();
1871
1872     JITCompiler::JumpList slowCases;
1873     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
1874     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
1875     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
1876
1877     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
1878     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
1879     cellResult(scratchReg, m_currentNode);
1880 }
1881
1882 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
1883 {
1884     VirtualRegister virtualRegister = node->virtualRegister();
1885     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1886
1887     switch (info.registerFormat()) {
1888     case DataFormatStorage:
1889         RELEASE_ASSERT_NOT_REACHED();
1890
1891     case DataFormatBoolean:
1892     case DataFormatCell:
1893         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1894         return GeneratedOperandTypeUnknown;
1895
1896     case DataFormatNone:
1897     case DataFormatJSCell:
1898     case DataFormatJS:
1899     case DataFormatJSBoolean:
1900     case DataFormatJSDouble:
1901         return GeneratedOperandJSValue;
1902
1903     case DataFormatJSInt32:
1904     case DataFormatInt32:
1905         return GeneratedOperandInteger;
1906
1907     default:
1908         RELEASE_ASSERT_NOT_REACHED();
1909         return GeneratedOperandTypeUnknown;
1910     }
1911 }
1912
1913 void SpeculativeJIT::compileValueToInt32(Node* node)
1914 {
1915     switch (node->child1().useKind()) {
1916 #if USE(JSVALUE64)
1917     case Int52RepUse: {
1918         SpeculateStrictInt52Operand op1(this, node->child1());
1919         GPRTemporary result(this, Reuse, op1);
1920         GPRReg op1GPR = op1.gpr();
1921         GPRReg resultGPR = result.gpr();
1922         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
1923         int32Result(resultGPR, node, DataFormatInt32);
1924         return;
1925     }
1926 #endif // USE(JSVALUE64)
1927         
1928     case DoubleRepUse: {
1929         GPRTemporary result(this);
1930         SpeculateDoubleOperand op1(this, node->child1());
1931         FPRReg fpr = op1.fpr();
1932         GPRReg gpr = result.gpr();
1933         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
1934         
1935         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
1936         
1937         int32Result(gpr, node);
1938         return;
1939     }
1940     
1941     case NumberUse:
1942     case NotCellUse: {
1943         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
1944         case GeneratedOperandInteger: {
1945             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
1946             GPRTemporary result(this, Reuse, op1);
1947             m_jit.move(op1.gpr(), result.gpr());
1948             int32Result(result.gpr(), node, op1.format());
1949             return;
1950         }
1951         case GeneratedOperandJSValue: {
1952             GPRTemporary result(this);
1953 #if USE(JSVALUE64)
1954             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1955
1956             GPRReg gpr = op1.gpr();
1957             GPRReg resultGpr = result.gpr();
1958             FPRTemporary tempFpr(this);
1959             FPRReg fpr = tempFpr.fpr();
1960
1961             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
1962             JITCompiler::JumpList converted;
1963
1964             if (node->child1().useKind() == NumberUse) {
1965                 DFG_TYPE_CHECK(
1966                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
1967                     m_jit.branchTest64(
1968                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
1969             } else {
1970                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
1971                 
1972                 DFG_TYPE_CHECK(
1973                     JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
1974                 
1975                 // It's not a cell: so true turns into 1 and all else turns into 0.
1976                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
1977                 converted.append(m_jit.jump());
1978                 
1979                 isNumber.link(&m_jit);
1980             }
1981
1982             // First, if we get here we have a double encoded as a JSValue
1983             m_jit.move(gpr, resultGpr);
1984             unboxDouble(resultGpr, fpr);
1985
1986             silentSpillAllRegisters(resultGpr);
1987             callOperation(toInt32, resultGpr, fpr);
1988             silentFillAllRegisters(resultGpr);
1989
1990             converted.append(m_jit.jump());
1991
1992             isInteger.link(&m_jit);
1993             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
1994
1995             converted.link(&m_jit);
1996 #else
1997             Node* childNode = node->child1().node();
1998             VirtualRegister virtualRegister = childNode->virtualRegister();
1999             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2000
2001             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2002
2003             GPRReg payloadGPR = op1.payloadGPR();
2004             GPRReg resultGpr = result.gpr();
2005         
2006             JITCompiler::JumpList converted;
2007
2008             if (info.registerFormat() == DataFormatJSInt32)
2009                 m_jit.move(payloadGPR, resultGpr);
2010             else {
2011                 GPRReg tagGPR = op1.tagGPR();
2012                 FPRTemporary tempFpr(this);
2013                 FPRReg fpr = tempFpr.fpr();
2014                 FPRTemporary scratch(this);
2015
2016                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2017
2018                 if (node->child1().useKind() == NumberUse) {
2019                     DFG_TYPE_CHECK(
2020                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2021                         m_jit.branch32(
2022                             MacroAssembler::AboveOrEqual, tagGPR,
2023                             TrustedImm32(JSValue::LowestTag)));
2024                 } else {
2025                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2026                     
2027                     DFG_TYPE_CHECK(
2028                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2029                         m_jit.branchIfCell(op1.jsValueRegs()));
2030                     
2031                     // It's not a cell: so true turns into 1 and all else turns into 0.
2032                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2033                     m_jit.move(TrustedImm32(0), resultGpr);
2034                     converted.append(m_jit.jump());
2035                     
2036                     isBoolean.link(&m_jit);
2037                     m_jit.move(payloadGPR, resultGpr);
2038                     converted.append(m_jit.jump());
2039                     
2040                     isNumber.link(&m_jit);
2041                 }
2042
2043                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2044
2045                 silentSpillAllRegisters(resultGpr);
2046                 callOperation(toInt32, resultGpr, fpr);
2047                 silentFillAllRegisters(resultGpr);
2048
2049                 converted.append(m_jit.jump());
2050
2051                 isInteger.link(&m_jit);
2052                 m_jit.move(payloadGPR, resultGpr);
2053
2054                 converted.link(&m_jit);
2055             }
2056 #endif
2057             int32Result(resultGpr, node);
2058             return;
2059         }
2060         case GeneratedOperandTypeUnknown:
2061             RELEASE_ASSERT(!m_compileOkay);
2062             return;
2063         }
2064         RELEASE_ASSERT_NOT_REACHED();
2065         return;
2066     }
2067     
2068     default:
2069         ASSERT(!m_compileOkay);
2070         return;
2071     }
2072 }
2073
2074 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2075 {
2076     if (doesOverflow(node->arithMode())) {
2077         // We know that this sometimes produces doubles. So produce a double every
2078         // time. This at least allows subsequent code to not have weird conditionals.
2079             
2080         SpeculateInt32Operand op1(this, node->child1());
2081         FPRTemporary result(this);
2082             
2083         GPRReg inputGPR = op1.gpr();
2084         FPRReg outputFPR = result.fpr();
2085             
2086         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2087             
2088         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2089         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2090         positive.link(&m_jit);
2091             
2092         doubleResult(outputFPR, node);
2093         return;
2094     }
2095     
2096     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2097
2098     SpeculateInt32Operand op1(this, node->child1());
2099     GPRTemporary result(this);
2100
2101     m_jit.move(op1.gpr(), result.gpr());
2102
2103     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2104
2105     int32Result(result.gpr(), node, op1.format());
2106 }
2107
2108 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2109 {
2110     SpeculateDoubleOperand op1(this, node->child1());
2111     FPRTemporary scratch(this);
2112     GPRTemporary result(this);
2113     
2114     FPRReg valueFPR = op1.fpr();
2115     FPRReg scratchFPR = scratch.fpr();
2116     GPRReg resultGPR = result.gpr();
2117
2118     JITCompiler::JumpList failureCases;
2119     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2120     m_jit.branchConvertDoubleToInt32(
2121         valueFPR, resultGPR, failureCases, scratchFPR,
2122         shouldCheckNegativeZero(node->arithMode()));
2123     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2124
2125     int32Result(resultGPR, node);
2126 }
2127
2128 void SpeculativeJIT::compileDoubleRep(Node* node)
2129 {
2130     switch (node->child1().useKind()) {
2131     case RealNumberUse: {
2132         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2133         FPRTemporary result(this);
2134         
2135         JSValueRegs op1Regs = op1.jsValueRegs();
2136         FPRReg resultFPR = result.fpr();
2137         
2138 #if USE(JSVALUE64)
2139         GPRTemporary temp(this);
2140         GPRReg tempGPR = temp.gpr();
2141         m_jit.move(op1Regs.gpr(), tempGPR);
2142         m_jit.unboxDoubleWithoutAssertions(tempGPR, resultFPR);
2143 #else
2144         FPRTemporary temp(this);
2145         FPRReg tempFPR = temp.fpr();
2146         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2147 #endif
2148         
2149         JITCompiler::Jump done = m_jit.branchDouble(
2150             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2151         
2152         DFG_TYPE_CHECK(
2153             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2154         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2155         
2156         done.link(&m_jit);
2157         
2158         doubleResult(resultFPR, node);
2159         return;
2160     }
2161     
2162     case NotCellUse:
2163     case NumberUse: {
2164         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2165
2166         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2167         if (isInt32Speculation(possibleTypes)) {
2168             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2169             FPRTemporary result(this);
2170             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2171             doubleResult(result.fpr(), node);
2172             return;
2173         }
2174
2175         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2176         FPRTemporary result(this);
2177
2178 #if USE(JSVALUE64)
2179         GPRTemporary temp(this);
2180
2181         GPRReg op1GPR = op1.gpr();
2182         GPRReg tempGPR = temp.gpr();
2183         FPRReg resultFPR = result.fpr();
2184         JITCompiler::JumpList done;
2185
2186         JITCompiler::Jump isInteger = m_jit.branch64(
2187             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2188
2189         if (node->child1().useKind() == NotCellUse) {
2190             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2191             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2192
2193             static const double zero = 0;
2194             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2195
2196             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2197             done.append(isNull);
2198
2199             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2200                 m_jit.branchTest64(JITCompiler::NonZero, op1GPR, TrustedImm32(static_cast<int32_t>(~1))));
2201
2202             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2203             static const double one = 1;
2204             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2205             done.append(isFalse);
2206
2207             isUndefined.link(&m_jit);
2208             static const double NaN = PNaN;
2209             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2210             done.append(m_jit.jump());
2211
2212             isNumber.link(&m_jit);
2213         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2214             typeCheck(
2215                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2216                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2217         }
2218     
2219         m_jit.move(op1GPR, tempGPR);
2220         unboxDouble(tempGPR, resultFPR);
2221         done.append(m_jit.jump());
2222     
2223         isInteger.link(&m_jit);
2224         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2225         done.link(&m_jit);
2226 #else // USE(JSVALUE64) -> this is the 32_64 case
2227         FPRTemporary temp(this);
2228     
2229         GPRReg op1TagGPR = op1.tagGPR();
2230         GPRReg op1PayloadGPR = op1.payloadGPR();
2231         FPRReg tempFPR = temp.fpr();
2232         FPRReg resultFPR = result.fpr();
2233         JITCompiler::JumpList done;
2234     
2235         JITCompiler::Jump isInteger = m_jit.branch32(
2236             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2237
2238         if (node->child1().useKind() == NotCellUse) {
2239             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2240             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2241
2242             static const double zero = 0;
2243             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2244
2245             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2246             done.append(isNull);
2247
2248             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2249
2250             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2251             static const double one = 1;
2252             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2253             done.append(isFalse);
2254
2255             isUndefined.link(&m_jit);
2256             static const double NaN = PNaN;
2257             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2258             done.append(m_jit.jump());
2259
2260             isNumber.link(&m_jit);
2261         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2262             typeCheck(
2263                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2264                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2265         }
2266
2267         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2268         done.append(m_jit.jump());
2269     
2270         isInteger.link(&m_jit);
2271         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2272         done.link(&m_jit);
2273 #endif // USE(JSVALUE64)
2274     
2275         doubleResult(resultFPR, node);
2276         return;
2277     }
2278         
2279 #if USE(JSVALUE64)
2280     case Int52RepUse: {
2281         SpeculateStrictInt52Operand value(this, node->child1());
2282         FPRTemporary result(this);
2283         
2284         GPRReg valueGPR = value.gpr();
2285         FPRReg resultFPR = result.fpr();
2286
2287         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2288         
2289         doubleResult(resultFPR, node);
2290         return;
2291     }
2292 #endif // USE(JSVALUE64)
2293         
2294     default:
2295         RELEASE_ASSERT_NOT_REACHED();
2296         return;
2297     }
2298 }
2299
2300 void SpeculativeJIT::compileValueRep(Node* node)
2301 {
2302     switch (node->child1().useKind()) {
2303     case DoubleRepUse: {
2304         SpeculateDoubleOperand value(this, node->child1());
2305         JSValueRegsTemporary result(this);
2306         
2307         FPRReg valueFPR = value.fpr();
2308         JSValueRegs resultRegs = result.regs();
2309         
2310         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2311         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2312         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2313         // local was purified.
2314         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2315             m_jit.purifyNaN(valueFPR);
2316
2317         boxDouble(valueFPR, resultRegs);
2318         
2319         jsValueResult(resultRegs, node);
2320         return;
2321     }
2322         
2323 #if USE(JSVALUE64)
2324     case Int52RepUse: {
2325         SpeculateStrictInt52Operand value(this, node->child1());
2326         GPRTemporary result(this);
2327         
2328         GPRReg valueGPR = value.gpr();
2329         GPRReg resultGPR = result.gpr();
2330         
2331         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2332         
2333         jsValueResult(resultGPR, node);
2334         return;
2335     }
2336 #endif // USE(JSVALUE64)
2337         
2338     default:
2339         RELEASE_ASSERT_NOT_REACHED();
2340         return;
2341     }
2342 }
2343
2344 static double clampDoubleToByte(double d)
2345 {
2346     d += 0.5;
2347     if (!(d > 0))
2348         d = 0;
2349     else if (d > 255)
2350         d = 255;
2351     return d;
2352 }
2353
2354 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2355 {
2356     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2357     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2358     jit.xorPtr(result, result);
2359     MacroAssembler::Jump clamped = jit.jump();
2360     tooBig.link(&jit);
2361     jit.move(JITCompiler::TrustedImm32(255), result);
2362     clamped.link(&jit);
2363     inBounds.link(&jit);
2364 }
2365
2366 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2367 {
2368     // Unordered compare so we pick up NaN
2369     static const double zero = 0;
2370     static const double byteMax = 255;
2371     static const double half = 0.5;
2372     jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2373     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2374     jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2375     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2376     
2377     jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2378     // FIXME: This should probably just use a floating point round!
2379     // https://bugs.webkit.org/show_bug.cgi?id=72054
2380     jit.addDouble(source, scratch);
2381     jit.truncateDoubleToInt32(scratch, result);   
2382     MacroAssembler::Jump truncatedInt = jit.jump();
2383     
2384     tooSmall.link(&jit);
2385     jit.xorPtr(result, result);
2386     MacroAssembler::Jump zeroed = jit.jump();
2387     
2388     tooBig.link(&jit);
2389     jit.move(JITCompiler::TrustedImm32(255), result);
2390     
2391     truncatedInt.link(&jit);
2392     zeroed.link(&jit);
2393
2394 }
2395
2396 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2397 {
2398     if (node->op() == PutByValAlias)
2399         return JITCompiler::Jump();
2400     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2401         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2402     if (view) {
2403         uint32_t length = view->length();
2404         Node* indexNode = m_jit.graph().child(node, 1).node();
2405         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2406             return JITCompiler::Jump();
2407         return m_jit.branch32(
2408             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2409     }
2410     return m_jit.branch32(
2411         MacroAssembler::AboveOrEqual, indexGPR,
2412         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2413 }
2414
2415 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2416 {
2417     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2418     if (!jump.isSet())
2419         return;
2420     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2421 }
2422
2423 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2424 {
2425     ASSERT(isInt(type));
2426     
2427     SpeculateCellOperand base(this, node->child1());
2428     SpeculateStrictInt32Operand property(this, node->child2());
2429     StorageOperand storage(this, node->child3());
2430
2431     GPRReg baseReg = base.gpr();
2432     GPRReg propertyReg = property.gpr();
2433     GPRReg storageReg = storage.gpr();
2434
2435     GPRTemporary result(this);
2436     GPRReg resultReg = result.gpr();
2437
2438     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2439
2440     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2441     switch (elementSize(type)) {
2442     case 1:
2443         if (isSigned(type))
2444             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2445         else
2446             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2447         break;
2448     case 2:
2449         if (isSigned(type))
2450             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2451         else
2452             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2453         break;
2454     case 4:
2455         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2456         break;
2457     default:
2458         CRASH();
2459     }
2460     if (elementSize(type) < 4 || isSigned(type)) {
2461         int32Result(resultReg, node);
2462         return;
2463     }
2464     
2465     ASSERT(elementSize(type) == 4 && !isSigned(type));
2466     if (node->shouldSpeculateInt32()) {
2467         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2468         int32Result(resultReg, node);
2469         return;
2470     }
2471     
2472 #if USE(JSVALUE64)
2473     if (node->shouldSpeculateMachineInt()) {
2474         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2475         strictInt52Result(resultReg, node);
2476         return;
2477     }
2478 #endif
2479     
2480     FPRTemporary fresult(this);
2481     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2482     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2483     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2484     positive.link(&m_jit);
2485     doubleResult(fresult.fpr(), node);
2486 }
2487
2488 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2489 {
2490     ASSERT(isInt(type));
2491     
2492     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2493     GPRReg storageReg = storage.gpr();
2494     
2495     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2496     
2497     GPRTemporary value;
2498     GPRReg valueGPR = InvalidGPRReg;
2499     
2500     if (valueUse->isConstant()) {
2501         JSValue jsValue = valueUse->asJSValue();
2502         if (!jsValue.isNumber()) {
2503             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2504             noResult(node);
2505             return;
2506         }
2507         double d = jsValue.asNumber();
2508         if (isClamped(type)) {
2509             ASSERT(elementSize(type) == 1);
2510             d = clampDoubleToByte(d);
2511         }
2512         GPRTemporary scratch(this);
2513         GPRReg scratchReg = scratch.gpr();
2514         m_jit.move(Imm32(toInt32(d)), scratchReg);
2515         value.adopt(scratch);
2516         valueGPR = scratchReg;
2517     } else {
2518         switch (valueUse.useKind()) {
2519         case Int32Use: {
2520             SpeculateInt32Operand valueOp(this, valueUse);
2521             GPRTemporary scratch(this);
2522             GPRReg scratchReg = scratch.gpr();
2523             m_jit.move(valueOp.gpr(), scratchReg);
2524             if (isClamped(type)) {
2525                 ASSERT(elementSize(type) == 1);
2526                 compileClampIntegerToByte(m_jit, scratchReg);
2527             }
2528             value.adopt(scratch);
2529             valueGPR = scratchReg;
2530             break;
2531         }
2532             
2533 #if USE(JSVALUE64)
2534         case Int52RepUse: {
2535             SpeculateStrictInt52Operand valueOp(this, valueUse);
2536             GPRTemporary scratch(this);
2537             GPRReg scratchReg = scratch.gpr();
2538             m_jit.move(valueOp.gpr(), scratchReg);
2539             if (isClamped(type)) {
2540                 ASSERT(elementSize(type) == 1);
2541                 MacroAssembler::Jump inBounds = m_jit.branch64(
2542                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2543                 MacroAssembler::Jump tooBig = m_jit.branch64(
2544                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2545                 m_jit.move(TrustedImm32(0), scratchReg);
2546                 MacroAssembler::Jump clamped = m_jit.jump();
2547                 tooBig.link(&m_jit);
2548                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2549                 clamped.link(&m_jit);
2550                 inBounds.link(&m_jit);
2551             }
2552             value.adopt(scratch);
2553             valueGPR = scratchReg;
2554             break;
2555         }
2556 #endif // USE(JSVALUE64)
2557             
2558         case DoubleRepUse: {
2559             if (isClamped(type)) {
2560                 ASSERT(elementSize(type) == 1);
2561                 SpeculateDoubleOperand valueOp(this, valueUse);
2562                 GPRTemporary result(this);
2563                 FPRTemporary floatScratch(this);
2564                 FPRReg fpr = valueOp.fpr();
2565                 GPRReg gpr = result.gpr();
2566                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2567                 value.adopt(result);
2568                 valueGPR = gpr;
2569             } else {
2570                 SpeculateDoubleOperand valueOp(this, valueUse);
2571                 GPRTemporary result(this);
2572                 FPRReg fpr = valueOp.fpr();
2573                 GPRReg gpr = result.gpr();
2574                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2575                 m_jit.xorPtr(gpr, gpr);
2576                 MacroAssembler::Jump fixed = m_jit.jump();
2577                 notNaN.link(&m_jit);
2578                 
2579                 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2580                     fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2581                 
2582                 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr));
2583                 
2584                 fixed.link(&m_jit);
2585                 value.adopt(result);
2586                 valueGPR = gpr;
2587             }
2588             break;
2589         }
2590             
2591         default:
2592             RELEASE_ASSERT_NOT_REACHED();
2593             break;
2594         }
2595     }
2596     
2597     ASSERT_UNUSED(valueGPR, valueGPR != property);
2598     ASSERT(valueGPR != base);
2599     ASSERT(valueGPR != storageReg);
2600     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2601     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2602         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2603         outOfBounds = MacroAssembler::Jump();
2604     }
2605
2606     switch (elementSize(type)) {
2607     case 1:
2608         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2609         break;
2610     case 2:
2611         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2612         break;
2613     case 4:
2614         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2615         break;
2616     default:
2617         CRASH();
2618     }
2619     if (outOfBounds.isSet())
2620         outOfBounds.link(&m_jit);
2621     noResult(node);
2622 }
2623
2624 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2625 {
2626     ASSERT(isFloat(type));
2627     
2628     SpeculateCellOperand base(this, node->child1());
2629     SpeculateStrictInt32Operand property(this, node->child2());
2630     StorageOperand storage(this, node->child3());
2631
2632     GPRReg baseReg = base.gpr();
2633     GPRReg propertyReg = property.gpr();
2634     GPRReg storageReg = storage.gpr();
2635
2636     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2637
2638     FPRTemporary result(this);
2639     FPRReg resultReg = result.fpr();
2640     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2641     switch (elementSize(type)) {
2642     case 4:
2643         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2644         m_jit.convertFloatToDouble(resultReg, resultReg);
2645         break;
2646     case 8: {
2647         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2648         break;
2649     }
2650     default:
2651         RELEASE_ASSERT_NOT_REACHED();
2652     }
2653     
2654     doubleResult(resultReg, node);
2655 }
2656
2657 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2658 {
2659     ASSERT(isFloat(type));
2660     
2661     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2662     GPRReg storageReg = storage.gpr();
2663     
2664     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2665     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2666
2667     SpeculateDoubleOperand valueOp(this, valueUse);
2668     FPRTemporary scratch(this);
2669     FPRReg valueFPR = valueOp.fpr();
2670     FPRReg scratchFPR = scratch.fpr();
2671
2672     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2673     
2674     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2675     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2676         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2677         outOfBounds = MacroAssembler::Jump();
2678     }
2679     
2680     switch (elementSize(type)) {
2681     case 4: {
2682         m_jit.moveDouble(valueFPR, scratchFPR);
2683         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2684         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2685         break;
2686     }
2687     case 8:
2688         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2689         break;
2690     default:
2691         RELEASE_ASSERT_NOT_REACHED();
2692     }
2693     if (outOfBounds.isSet())
2694         outOfBounds.link(&m_jit);
2695     noResult(node);
2696 }
2697
2698 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2699 {
2700     // Check that prototype is an object.
2701     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
2702     
2703     // Initialize scratchReg with the value being checked.
2704     m_jit.move(valueReg, scratchReg);
2705     
2706     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2707     MacroAssembler::Label loop(&m_jit);
2708     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2709     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2710     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2711 #if USE(JSVALUE64)
2712     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2713 #else
2714     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2715 #endif
2716     
2717     // No match - result is false.
2718 #if USE(JSVALUE64)
2719     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2720 #else
2721     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2722 #endif
2723     MacroAssembler::Jump putResult = m_jit.jump();
2724     
2725     isInstance.link(&m_jit);
2726 #if USE(JSVALUE64)
2727     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2728 #else
2729     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2730 #endif
2731     
2732     putResult.link(&m_jit);
2733 }
2734
2735 void SpeculativeJIT::compileInstanceOf(Node* node)
2736 {
2737     if (node->child1().useKind() == UntypedUse) {
2738         // It might not be a cell. Speculate less aggressively.
2739         // Or: it might only be used once (i.e. by us), so we get zero benefit
2740         // from speculating any more aggressively than we absolutely need to.
2741         
2742         JSValueOperand value(this, node->child1());
2743         SpeculateCellOperand prototype(this, node->child2());
2744         GPRTemporary scratch(this);
2745         GPRTemporary scratch2(this);
2746         
2747         GPRReg prototypeReg = prototype.gpr();
2748         GPRReg scratchReg = scratch.gpr();
2749         GPRReg scratch2Reg = scratch2.gpr();
2750         
2751         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
2752         GPRReg valueReg = value.jsValueRegs().payloadGPR();
2753         moveFalseTo(scratchReg);
2754
2755         MacroAssembler::Jump done = m_jit.jump();
2756         
2757         isCell.link(&m_jit);
2758         
2759         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2760         
2761         done.link(&m_jit);
2762
2763         blessedBooleanResult(scratchReg, node);
2764         return;
2765     }
2766     
2767     SpeculateCellOperand value(this, node->child1());
2768     SpeculateCellOperand prototype(this, node->child2());
2769     
2770     GPRTemporary scratch(this);
2771     GPRTemporary scratch2(this);
2772     
2773     GPRReg valueReg = value.gpr();
2774     GPRReg prototypeReg = prototype.gpr();
2775     GPRReg scratchReg = scratch.gpr();
2776     GPRReg scratch2Reg = scratch2.gpr();
2777     
2778     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2779
2780     blessedBooleanResult(scratchReg, node);
2781 }
2782
2783 void SpeculativeJIT::compileAdd(Node* node)
2784 {
2785     switch (node->binaryUseKind()) {
2786     case Int32Use: {
2787         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2788         
2789         if (node->child1()->isInt32Constant()) {
2790             int32_t imm1 = node->child1()->asInt32();
2791             SpeculateInt32Operand op2(this, node->child2());
2792             GPRTemporary result(this);
2793
2794             if (!shouldCheckOverflow(node->arithMode())) {
2795                 m_jit.move(op2.gpr(), result.gpr());
2796                 m_jit.add32(Imm32(imm1), result.gpr());
2797             } else
2798                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
2799
2800             int32Result(result.gpr(), node);
2801             return;
2802         }
2803         
2804         if (node->child2()->isInt32Constant()) {
2805             SpeculateInt32Operand op1(this, node->child1());
2806             int32_t imm2 = node->child2()->asInt32();
2807             GPRTemporary result(this);
2808                 
2809             if (!shouldCheckOverflow(node->arithMode())) {
2810                 m_jit.move(op1.gpr(), result.gpr());
2811                 m_jit.add32(Imm32(imm2), result.gpr());
2812             } else
2813                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
2814
2815             int32Result(result.gpr(), node);
2816             return;
2817         }
2818                 
2819         SpeculateInt32Operand op1(this, node->child1());
2820         SpeculateInt32Operand op2(this, node->child2());
2821         GPRTemporary result(this, Reuse, op1, op2);
2822
2823         GPRReg gpr1 = op1.gpr();
2824         GPRReg gpr2 = op2.gpr();
2825         GPRReg gprResult = result.gpr();
2826
2827         if (!shouldCheckOverflow(node->arithMode())) {
2828             if (gpr1 == gprResult)
2829                 m_jit.add32(gpr2, gprResult);
2830             else {
2831                 m_jit.move(gpr2, gprResult);
2832                 m_jit.add32(gpr1, gprResult);
2833             }
2834         } else {
2835             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
2836                 
2837             if (gpr1 == gprResult)
2838                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
2839             else if (gpr2 == gprResult)
2840                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
2841             else
2842                 speculationCheck(Overflow, JSValueRegs(), 0, check);
2843         }
2844
2845         int32Result(gprResult, node);
2846         return;
2847     }
2848         
2849 #if USE(JSVALUE64)
2850     case Int52RepUse: {
2851         ASSERT(shouldCheckOverflow(node->arithMode()));
2852         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2853
2854         // Will we need an overflow check? If we can prove that neither input can be
2855         // Int52 then the overflow check will not be necessary.
2856         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2857             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2858             SpeculateWhicheverInt52Operand op1(this, node->child1());
2859             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2860             GPRTemporary result(this, Reuse, op1);
2861             m_jit.move(op1.gpr(), result.gpr());
2862             m_jit.add64(op2.gpr(), result.gpr());
2863             int52Result(result.gpr(), node, op1.format());
2864             return;
2865         }
2866         
2867         SpeculateInt52Operand op1(this, node->child1());
2868         SpeculateInt52Operand op2(this, node->child2());
2869         GPRTemporary result(this);
2870         m_jit.move(op1.gpr(), result.gpr());
2871         speculationCheck(
2872             Int52Overflow, JSValueRegs(), 0,
2873             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2874         int52Result(result.gpr(), node);
2875         return;
2876     }
2877 #endif // USE(JSVALUE64)
2878     
2879     case DoubleRepUse: {
2880         SpeculateDoubleOperand op1(this, node->child1());
2881         SpeculateDoubleOperand op2(this, node->child2());
2882         FPRTemporary result(this, op1, op2);
2883
2884         FPRReg reg1 = op1.fpr();
2885         FPRReg reg2 = op2.fpr();
2886         m_jit.addDouble(reg1, reg2, result.fpr());
2887
2888         doubleResult(result.fpr(), node);
2889         return;
2890     }
2891         
2892     default:
2893         RELEASE_ASSERT_NOT_REACHED();
2894         break;
2895     }
2896 }
2897
2898 void SpeculativeJIT::compileMakeRope(Node* node)
2899 {
2900     ASSERT(node->child1().useKind() == KnownStringUse);
2901     ASSERT(node->child2().useKind() == KnownStringUse);
2902     ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
2903     
2904     SpeculateCellOperand op1(this, node->child1());
2905     SpeculateCellOperand op2(this, node->child2());
2906     SpeculateCellOperand op3(this, node->child3());
2907     GPRTemporary result(this);
2908     GPRTemporary allocator(this);
2909     GPRTemporary scratch(this);
2910     
2911     GPRReg opGPRs[3];
2912     unsigned numOpGPRs;
2913     opGPRs[0] = op1.gpr();
2914     opGPRs[1] = op2.gpr();
2915     if (node->child3()) {
2916         opGPRs[2] = op3.gpr();
2917         numOpGPRs = 3;
2918     } else {
2919         opGPRs[2] = InvalidGPRReg;
2920         numOpGPRs = 2;
2921     }
2922     GPRReg resultGPR = result.gpr();
2923     GPRReg allocatorGPR = allocator.gpr();
2924     GPRReg scratchGPR = scratch.gpr();
2925     
2926     JITCompiler::JumpList slowPath;
2927     MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithDestructor(sizeof(JSRopeString));
2928     m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
2929     emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
2930         
2931     m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
2932     for (unsigned i = 0; i < numOpGPRs; ++i)
2933         m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2934     for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
2935         m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2936     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
2937     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
2938     if (!ASSERT_DISABLED) {
2939         JITCompiler::Jump ok = m_jit.branch32(
2940             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2941         m_jit.abortWithReason(DFGNegativeStringLength);
2942         ok.link(&m_jit);
2943     }
2944     for (unsigned i = 1; i < numOpGPRs; ++i) {
2945         m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
2946         speculationCheck(
2947             Uncountable, JSValueSource(), nullptr,
2948             m_jit.branchAdd32(
2949                 JITCompiler::Overflow,
2950                 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
2951     }
2952     m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
2953     m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
2954     if (!ASSERT_DISABLED) {
2955         JITCompiler::Jump ok = m_jit.branch32(
2956             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2957         m_jit.abortWithReason(DFGNegativeStringLength);
2958         ok.link(&m_jit);
2959     }
2960     m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
2961     
2962     switch (numOpGPRs) {
2963     case 2:
2964         addSlowPathGenerator(slowPathCall(
2965             slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
2966         break;
2967     case 3:
2968         addSlowPathGenerator(slowPathCall(
2969             slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
2970         break;
2971     default:
2972         RELEASE_ASSERT_NOT_REACHED();
2973         break;
2974     }
2975         
2976     cellResult(resultGPR, node);
2977 }
2978
2979 void SpeculativeJIT::compileArithClz32(Node* node)
2980 {
2981     ASSERT_WITH_MESSAGE(node->child1().useKind() == Int32Use || node->child1().useKind() == KnownInt32Use, "The Fixup phase should have enforced a Int32 operand.");
2982     SpeculateInt32Operand value(this, node->child1());
2983     GPRTemporary result(this, Reuse, value);
2984     GPRReg valueReg = value.gpr();
2985     GPRReg resultReg = result.gpr();
2986     m_jit.countLeadingZeros32(valueReg, resultReg);
2987     int32Result(resultReg, node);
2988 }
2989
2990 void SpeculativeJIT::compileArithSub(Node* node)
2991 {
2992     switch (node->binaryUseKind()) {
2993     case Int32Use: {
2994         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2995         
2996         if (node->child2()->isInt32Constant()) {
2997             SpeculateInt32Operand op1(this, node->child1());
2998             int32_t imm2 = node->child2()->asInt32();
2999             GPRTemporary result(this);
3000
3001             if (!shouldCheckOverflow(node->arithMode())) {
3002                 m_jit.move(op1.gpr(), result.gpr());
3003                 m_jit.sub32(Imm32(imm2), result.gpr());
3004             } else {
3005                 GPRTemporary scratch(this);
3006                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
3007             }
3008
3009             int32Result(result.gpr(), node);
3010             return;
3011         }
3012             
3013         if (node->child1()->isInt32Constant()) {
3014             int32_t imm1 = node->child1()->asInt32();
3015             SpeculateInt32Operand op2(this, node->child2());
3016             GPRTemporary result(this);
3017                 
3018             m_jit.move(Imm32(imm1), result.gpr());
3019             if (!shouldCheckOverflow(node->arithMode()))
3020                 m_jit.sub32(op2.gpr(), result.gpr());
3021             else
3022                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3023                 
3024             int32Result(result.gpr(), node);
3025             return;
3026         }
3027             
3028         SpeculateInt32Operand op1(this, node->child1());
3029         SpeculateInt32Operand op2(this, node->child2());
3030         GPRTemporary result(this);
3031
3032         if (!shouldCheckOverflow(node->arithMode())) {
3033             m_jit.move(op1.gpr(), result.gpr());
3034             m_jit.sub32(op2.gpr(), result.gpr());
3035         } else
3036             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
3037
3038         int32Result(result.gpr(), node);
3039         return;
3040     }
3041         
3042 #if USE(JSVALUE64)
3043     case Int52RepUse: {
3044         ASSERT(shouldCheckOverflow(node->arithMode()));
3045         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3046
3047         // Will we need an overflow check? If we can prove that neither input can be
3048         // Int52 then the overflow check will not be necessary.
3049         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
3050             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
3051             SpeculateWhicheverInt52Operand op1(this, node->child1());
3052             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3053             GPRTemporary result(this, Reuse, op1);
3054             m_jit.move(op1.gpr(), result.gpr());
3055             m_jit.sub64(op2.gpr(), result.gpr());
3056             int52Result(result.gpr(), node, op1.format());
3057             return;
3058         }
3059         
3060         SpeculateInt52Operand op1(this, node->child1());
3061         SpeculateInt52Operand op2(this, node->child2());
3062         GPRTemporary result(this);
3063         m_jit.move(op1.gpr(), result.gpr());
3064         speculationCheck(
3065             Int52Overflow, JSValueRegs(), 0,
3066             m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3067         int52Result(result.gpr(), node);
3068         return;
3069     }
3070 #endif // USE(JSVALUE64)
3071
3072     case DoubleRepUse: {
3073         SpeculateDoubleOperand op1(this, node->child1());
3074         SpeculateDoubleOperand op2(this, node->child2());
3075         FPRTemporary result(this, op1);
3076
3077         FPRReg reg1 = op1.fpr();
3078         FPRReg reg2 = op2.fpr();
3079         m_jit.subDouble(reg1, reg2, result.fpr());
3080
3081         doubleResult(result.fpr(), node);
3082         return;
3083     }
3084         
3085     default:
3086         RELEASE_ASSERT_NOT_REACHED();
3087         return;
3088     }
3089 }
3090
3091 void SpeculativeJIT::compileArithNegate(Node* node)
3092 {
3093     switch (node->child1().useKind()) {
3094     case Int32Use: {
3095         SpeculateInt32Operand op1(this, node->child1());
3096         GPRTemporary result(this);
3097
3098         m_jit.move(op1.gpr(), result.gpr());
3099
3100         // Note: there is no notion of being not used as a number, but someone
3101         // caring about negative zero.
3102         
3103         if (!shouldCheckOverflow(node->arithMode()))
3104             m_jit.neg32(result.gpr());
3105         else if (!shouldCheckNegativeZero(node->arithMode()))
3106             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
3107         else {
3108             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
3109             m_jit.neg32(result.gpr());
3110         }
3111
3112         int32Result(result.gpr(), node);
3113         return;
3114     }
3115
3116 #if USE(JSVALUE64)
3117     case Int52RepUse: {
3118         ASSERT(shouldCheckOverflow(node->arithMode()));
3119         
3120         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)) {
3121             SpeculateWhicheverInt52Operand op1(this, node->child1());
3122             GPRTemporary result(this);
3123             GPRReg op1GPR = op1.gpr();
3124             GPRReg resultGPR = result.gpr();
3125             m_jit.move(op1GPR, resultGPR);
3126             m_jit.neg64(resultGPR);
3127             if (shouldCheckNegativeZero(node->arithMode())) {
3128                 speculationCheck(
3129                     NegativeZero, JSValueRegs(), 0,
3130                     m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3131             }
3132             int52Result(resultGPR, node, op1.format());
3133             return;
3134         }
3135         
3136         SpeculateInt52Operand op1(this, node->child1());
3137         GPRTemporary result(this);
3138         GPRReg op1GPR = op1.gpr();
3139         GPRReg resultGPR = result.gpr();
3140         m_jit.move(op1GPR, resultGPR);
3141         speculationCheck(
3142             Int52Overflow, JSValueRegs(), 0,
3143             m_jit.branchNeg64(MacroAssembler::Overflow, resultGPR));
3144         if (shouldCheckNegativeZero(node->arithMode())) {
3145             speculationCheck(
3146                 NegativeZero, JSValueRegs(), 0,
3147                 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3148         }
3149         int52Result(resultGPR, node);
3150         return;
3151     }
3152 #endif // USE(JSVALUE64)
3153         
3154     case DoubleRepUse: {
3155         SpeculateDoubleOperand op1(this, node->child1());
3156         FPRTemporary result(this);
3157         
3158         m_jit.negateDouble(op1.fpr(), result.fpr());
3159         
3160         doubleResult(result.fpr(), node);
3161         return;
3162     }
3163         
3164     default:
3165         RELEASE_ASSERT_NOT_REACHED();
3166         return;
3167     }
3168 }
3169 void SpeculativeJIT::compileArithMul(Node* node)
3170 {
3171     switch (node->binaryUseKind()) {
3172     case Int32Use: {
3173         SpeculateInt32Operand op1(this, node->child1());
3174         SpeculateInt32Operand op2(this, node->child2());
3175         GPRTemporary result(this);
3176
3177         GPRReg reg1 = op1.gpr();
3178         GPRReg reg2 = op2.gpr();
3179
3180         // We can perform truncated multiplications if we get to this point, because if the
3181         // fixup phase could not prove that it would be safe, it would have turned us into
3182         // a double multiplication.
3183         if (!shouldCheckOverflow(node->arithMode())) {
3184             m_jit.move(reg1, result.gpr());
3185             m_jit.mul32(reg2, result.gpr());
3186         } else {
3187             speculationCheck(
3188                 Overflow, JSValueRegs(), 0,
3189                 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3190         }
3191             
3192         // Check for negative zero, if the users of this node care about such things.
3193         if (shouldCheckNegativeZero(node->arithMode())) {
3194             MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3195             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
3196             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
3197             resultNonZero.link(&m_jit);
3198         }
3199
3200         int32Result(result.gpr(), node);
3201         return;
3202     }
3203     
3204 #if USE(JSVALUE64)   
3205     case Int52RepUse: {
3206         ASSERT(shouldCheckOverflow(node->arithMode()));
3207         
3208         // This is super clever. We want to do an int52 multiplication and check the
3209         // int52 overflow bit. There is no direct hardware support for this, but we do
3210         // have the ability to do an int64 multiplication and check the int64 overflow
3211         // bit. We leverage that. Consider that a, b are int52 numbers inside int64
3212         // registers, with the high 12 bits being sign-extended. We can do:
3213         //
3214         //     (a * (b << 12))
3215         //
3216         // This will give us a left-shifted int52 (value is in high 52 bits, low 16
3217         // bits are zero) plus the int52 overflow bit. I.e. whether this 64-bit
3218         // multiplication overflows is identical to whether the 'a * b' 52-bit
3219         // multiplication overflows.
3220         //
3221         // In our nomenclature, this is:
3222         //
3223         //     strictInt52(a) * int52(b) => int52
3224         //
3225         // That is "strictInt52" means unshifted and "int52" means left-shifted by 16
3226         // bits.
3227         //
3228         // We don't care which of op1 or op2 serves as the left-shifted operand, so
3229         // we just do whatever is more convenient for op1 and have op2 do the
3230         // opposite. This ensures that we do at most one shift.
3231
3232         SpeculateWhicheverInt52Operand op1(this, node->child1());
3233         SpeculateWhicheverInt52Operand op2(this, node->child2(), OppositeShift, op1);
3234         GPRTemporary result(this);
3235         
3236         GPRReg op1GPR = op1.gpr();
3237         GPRReg op2GPR = op2.gpr();
3238         GPRReg resultGPR = result.gpr();
3239         
3240         m_jit.move(op1GPR, resultGPR);
3241         speculationCheck(
3242             Int52Overflow, JSValueRegs(), 0,
3243             m_jit.branchMul64(MacroAssembler::Overflow, op2GPR, resultGPR));
3244         
3245         if (shouldCheckNegativeZero(node->arithMode())) {
3246             MacroAssembler::Jump resultNonZero = m_jit.branchTest64(
3247                 MacroAssembler::NonZero, resultGPR);
3248             speculationCheck(
3249                 NegativeZero, JSValueRegs(), 0,
3250                 m_jit.branch64(MacroAssembler::LessThan, op1GPR, TrustedImm64(0)));
3251             speculationCheck(
3252                 NegativeZero, JSValueRegs(), 0,
3253                 m_jit.branch64(MacroAssembler::LessThan, op2GPR, TrustedImm64(0)));
3254             resultNonZero.link(&m_jit);
3255         }
3256         
3257         int52Result(resultGPR, node);
3258         return;
3259     }
3260 #endif // USE(JSVALUE64)
3261         
3262     case DoubleRepUse: {
3263         SpeculateDoubleOperand op1(this, node->child1());
3264         SpeculateDoubleOperand op2(this, node->child2());
3265         FPRTemporary result(this, op1, op2);
3266         
3267         FPRReg reg1 = op1.fpr();
3268         FPRReg reg2 = op2.fpr();
3269         
3270         m_jit.mulDouble(reg1, reg2, result.fpr());
3271         
3272         doubleResult(result.fpr(), node);
3273         return;
3274     }
3275         
3276     default:
3277         RELEASE_ASSERT_NOT_REACHED();
3278         return;
3279     }
3280 }
3281
3282 void SpeculativeJIT::compileArithDiv(Node* node)
3283 {
3284     switch (node->binaryUseKind()) {
3285     case Int32Use: {
3286 #if CPU(X86) || CPU(X86_64)
3287         SpeculateInt32Operand op1(this, node->child1());
3288         SpeculateInt32Operand op2(this, node->child2());
3289         GPRTemporary eax(this, X86Registers::eax);
3290         GPRTemporary edx(this, X86Registers::edx);
3291         GPRReg op1GPR = op1.gpr();
3292         GPRReg op2GPR = op2.gpr();
3293     
3294         GPRReg op2TempGPR;
3295         GPRReg temp;
3296         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3297             op2TempGPR = allocate();
3298             temp = op2TempGPR;
3299         } else {
3300             op2TempGPR = InvalidGPRReg;
3301             if (op1GPR == X86Registers::eax)
3302                 temp = X86Registers::edx;
3303             else
3304                 temp = X86Registers::eax;
3305         }
3306     
3307         ASSERT(temp != op1GPR);
3308         ASSERT(temp != op2GPR);
3309     
3310         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);