speculateRealNumber() should early exit if you're already a real number, not if you...
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGSaneStringGetByValSlowPathGenerator.h"
38 #include "DFGSlowPathGenerator.h"
39 #include "DirectArguments.h"
40 #include "JSCInlines.h"
41 #include "JSEnvironmentRecord.h"
42 #include "JSLexicalEnvironment.h"
43 #include "LinkBuffer.h"
44 #include "ScopedArguments.h"
45 #include "ScratchRegisterAllocator.h"
46 #include "WriteBarrierBuffer.h"
47 #include <wtf/MathExtras.h>
48
49 namespace JSC { namespace DFG {
50
51 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
52     : m_compileOkay(true)
53     , m_jit(jit)
54     , m_currentNode(0)
55     , m_lastGeneratedNode(LastNodeType)
56     , m_indexInBlock(0)
57     , m_generationInfo(m_jit.graph().frameRegisterCount())
58     , m_state(m_jit.graph())
59     , m_interpreter(m_jit.graph(), m_state)
60     , m_stream(&jit.jitCode()->variableEventStream)
61     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
62     , m_isCheckingArgumentTypes(false)
63 {
64 }
65
66 SpeculativeJIT::~SpeculativeJIT()
67 {
68 }
69
70 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
71 {
72     ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
73     
74     GPRTemporary scratch(this);
75     GPRTemporary scratch2(this);
76     GPRReg scratchGPR = scratch.gpr();
77     GPRReg scratch2GPR = scratch2.gpr();
78     
79     unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
80     
81     JITCompiler::JumpList slowCases;
82     
83     slowCases.append(
84         emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
85     m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
86     emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
87     
88     m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
89     m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
90     
91     if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
92 #if USE(JSVALUE64)
93         m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
94         for (unsigned i = numElements; i < vectorLength; ++i)
95             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
96 #else
97         EncodedValueDescriptor value;
98         value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
99         for (unsigned i = numElements; i < vectorLength; ++i) {
100             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
101             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
102         }
103 #endif
104     }
105     
106     // I want a slow path that also loads out the storage pointer, and that's
107     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
108     // of work for a very small piece of functionality. :-/
109     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
110         slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
111         structure, numElements));
112 }
113
114 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
115 {
116     if (inlineCallFrame && !inlineCallFrame->isVarargs())
117         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
118     else {
119         VirtualRegister argumentCountRegister;
120         if (!inlineCallFrame)
121             argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
122         else
123             argumentCountRegister = inlineCallFrame->argumentCountRegister;
124         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
125         if (!includeThis)
126             m_jit.sub32(TrustedImm32(1), lengthGPR);
127     }
128 }
129
130 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
131 {
132     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
133 }
134
135 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
136 {
137     if (origin.inlineCallFrame) {
138         if (origin.inlineCallFrame->isClosureCall) {
139             m_jit.loadPtr(
140                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
141                 calleeGPR);
142         } else {
143             m_jit.move(
144                 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
145                 calleeGPR);
146         }
147     } else
148         m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), calleeGPR);
149 }
150
151 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
152 {
153     m_jit.addPtr(
154         TrustedImm32(
155             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
156         GPRInfo::callFrameRegister, startGPR);
157 }
158
159 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
160 {
161     if (!m_compileOkay)
162         return;
163     ASSERT(m_isCheckingArgumentTypes || m_canExit);
164     m_jit.appendExitInfo(jumpToFail);
165     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
166 }
167
168 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
169 {
170     if (!m_compileOkay)
171         return;
172     ASSERT(m_isCheckingArgumentTypes || m_canExit);
173     m_jit.appendExitInfo(jumpsToFail);
174     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
175 }
176
177 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
178 {
179     if (!m_compileOkay)
180         return OSRExitJumpPlaceholder();
181     ASSERT(m_isCheckingArgumentTypes || m_canExit);
182     unsigned index = m_jit.jitCode()->osrExit.size();
183     m_jit.appendExitInfo();
184     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
185     return OSRExitJumpPlaceholder(index);
186 }
187
188 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
189 {
190     ASSERT(m_isCheckingArgumentTypes || m_canExit);
191     return speculationCheck(kind, jsValueSource, nodeUse.node());
192 }
193
194 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
195 {
196     ASSERT(m_isCheckingArgumentTypes || m_canExit);
197     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
198 }
199
200 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
201 {
202     ASSERT(m_isCheckingArgumentTypes || m_canExit);
203     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
204 }
205
206 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
207 {
208     if (!m_compileOkay)
209         return;
210     ASSERT(m_isCheckingArgumentTypes || m_canExit);
211     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
212     m_jit.appendExitInfo(jumpToFail);
213     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
214 }
215
216 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
217 {
218     ASSERT(m_isCheckingArgumentTypes || m_canExit);
219     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
220 }
221
222 void SpeculativeJIT::emitInvalidationPoint(Node* node)
223 {
224     if (!m_compileOkay)
225         return;
226     ASSERT(m_canExit);
227     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
228     m_jit.jitCode()->appendOSRExit(OSRExit(
229         UncountableInvalidation, JSValueSource(),
230         m_jit.graph().methodOfGettingAValueProfileFor(node),
231         this, m_stream->size()));
232     info.m_replacementSource = m_jit.watchpointLabel();
233     ASSERT(info.m_replacementSource.isSet());
234     noResult(node);
235 }
236
237 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
238 {
239     ASSERT(m_isCheckingArgumentTypes || m_canExit);
240     if (!m_compileOkay)
241         return;
242     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
243     m_compileOkay = false;
244     if (verboseCompilationEnabled())
245         dataLog("Bailing compilation.\n");
246 }
247
248 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
249 {
250     ASSERT(m_isCheckingArgumentTypes || m_canExit);
251     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
252 }
253
254 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
255 {
256     ASSERT(needsTypeCheck(edge, typesPassedThrough));
257     m_interpreter.filter(edge, typesPassedThrough);
258     speculationCheck(BadType, source, edge.node(), jumpToFail);
259 }
260
261 RegisterSet SpeculativeJIT::usedRegisters()
262 {
263     RegisterSet result;
264     
265     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
266         GPRReg gpr = GPRInfo::toRegister(i);
267         if (m_gprs.isInUse(gpr))
268             result.set(gpr);
269     }
270     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
271         FPRReg fpr = FPRInfo::toRegister(i);
272         if (m_fprs.isInUse(fpr))
273             result.set(fpr);
274     }
275     
276     result.merge(RegisterSet::specialRegisters());
277     
278     return result;
279 }
280
281 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
282 {
283     m_slowPathGenerators.append(WTF::move(slowPathGenerator));
284 }
285
286 void SpeculativeJIT::runSlowPathGenerators()
287 {
288     for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
289         m_slowPathGenerators[i]->generate(this);
290 }
291
292 // On Windows we need to wrap fmod; on other platforms we can call it directly.
293 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
294 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
295 static double JIT_OPERATION fmodAsDFGOperation(double x, double y)
296 {
297     return fmod(x, y);
298 }
299 #else
300 #define fmodAsDFGOperation fmod
301 #endif
302
303 void SpeculativeJIT::clearGenerationInfo()
304 {
305     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
306         m_generationInfo[i] = GenerationInfo();
307     m_gprs = RegisterBank<GPRInfo>();
308     m_fprs = RegisterBank<FPRInfo>();
309 }
310
311 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
312 {
313     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
314     Node* node = info.node();
315     DataFormat registerFormat = info.registerFormat();
316     ASSERT(registerFormat != DataFormatNone);
317     ASSERT(registerFormat != DataFormatDouble);
318         
319     SilentSpillAction spillAction;
320     SilentFillAction fillAction;
321         
322     if (!info.needsSpill())
323         spillAction = DoNothingForSpill;
324     else {
325 #if USE(JSVALUE64)
326         ASSERT(info.gpr() == source);
327         if (registerFormat == DataFormatInt32)
328             spillAction = Store32Payload;
329         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
330             spillAction = StorePtr;
331         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
332             spillAction = Store64;
333         else {
334             ASSERT(registerFormat & DataFormatJS);
335             spillAction = Store64;
336         }
337 #elif USE(JSVALUE32_64)
338         if (registerFormat & DataFormatJS) {
339             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
340             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
341         } else {
342             ASSERT(info.gpr() == source);
343             spillAction = Store32Payload;
344         }
345 #endif
346     }
347         
348     if (registerFormat == DataFormatInt32) {
349         ASSERT(info.gpr() == source);
350         ASSERT(isJSInt32(info.registerFormat()));
351         if (node->hasConstant()) {
352             ASSERT(node->isInt32Constant());
353             fillAction = SetInt32Constant;
354         } else
355             fillAction = Load32Payload;
356     } else if (registerFormat == DataFormatBoolean) {
357 #if USE(JSVALUE64)
358         RELEASE_ASSERT_NOT_REACHED();
359 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
360         fillAction = DoNothingForFill;
361 #endif
362 #elif USE(JSVALUE32_64)
363         ASSERT(info.gpr() == source);
364         if (node->hasConstant()) {
365             ASSERT(node->isBooleanConstant());
366             fillAction = SetBooleanConstant;
367         } else
368             fillAction = Load32Payload;
369 #endif
370     } else if (registerFormat == DataFormatCell) {
371         ASSERT(info.gpr() == source);
372         if (node->hasConstant()) {
373             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
374             node->asCell(); // To get the assertion.
375             fillAction = SetCellConstant;
376         } else {
377 #if USE(JSVALUE64)
378             fillAction = LoadPtr;
379 #else
380             fillAction = Load32Payload;
381 #endif
382         }
383     } else if (registerFormat == DataFormatStorage) {
384         ASSERT(info.gpr() == source);
385         fillAction = LoadPtr;
386     } else if (registerFormat == DataFormatInt52) {
387         if (node->hasConstant())
388             fillAction = SetInt52Constant;
389         else if (info.spillFormat() == DataFormatInt52)
390             fillAction = Load64;
391         else if (info.spillFormat() == DataFormatStrictInt52)
392             fillAction = Load64ShiftInt52Left;
393         else if (info.spillFormat() == DataFormatNone)
394             fillAction = Load64;
395         else {
396             RELEASE_ASSERT_NOT_REACHED();
397 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
398             fillAction = Load64; // Make GCC happy.
399 #endif
400         }
401     } else if (registerFormat == DataFormatStrictInt52) {
402         if (node->hasConstant())
403             fillAction = SetStrictInt52Constant;
404         else if (info.spillFormat() == DataFormatInt52)
405             fillAction = Load64ShiftInt52Right;
406         else if (info.spillFormat() == DataFormatStrictInt52)
407             fillAction = Load64;
408         else if (info.spillFormat() == DataFormatNone)
409             fillAction = Load64;
410         else {
411             RELEASE_ASSERT_NOT_REACHED();
412 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
413             fillAction = Load64; // Make GCC happy.
414 #endif
415         }
416     } else {
417         ASSERT(registerFormat & DataFormatJS);
418 #if USE(JSVALUE64)
419         ASSERT(info.gpr() == source);
420         if (node->hasConstant()) {
421             if (node->isCellConstant())
422                 fillAction = SetTrustedJSConstant;
423             else
424                 fillAction = SetJSConstant;
425         } else if (info.spillFormat() == DataFormatInt32) {
426             ASSERT(registerFormat == DataFormatJSInt32);
427             fillAction = Load32PayloadBoxInt;
428         } else
429             fillAction = Load64;
430 #else
431         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
432         if (node->hasConstant())
433             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
434         else if (info.payloadGPR() == source)
435             fillAction = Load32Payload;
436         else { // Fill the Tag
437             switch (info.spillFormat()) {
438             case DataFormatInt32:
439                 ASSERT(registerFormat == DataFormatJSInt32);
440                 fillAction = SetInt32Tag;
441                 break;
442             case DataFormatCell:
443                 ASSERT(registerFormat == DataFormatJSCell);
444                 fillAction = SetCellTag;
445                 break;
446             case DataFormatBoolean:
447                 ASSERT(registerFormat == DataFormatJSBoolean);
448                 fillAction = SetBooleanTag;
449                 break;
450             default:
451                 fillAction = Load32Tag;
452                 break;
453             }
454         }
455 #endif
456     }
457         
458     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
459 }
460     
461 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
462 {
463     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
464     Node* node = info.node();
465     ASSERT(info.registerFormat() == DataFormatDouble);
466
467     SilentSpillAction spillAction;
468     SilentFillAction fillAction;
469         
470     if (!info.needsSpill())
471         spillAction = DoNothingForSpill;
472     else {
473         ASSERT(!node->hasConstant());
474         ASSERT(info.spillFormat() == DataFormatNone);
475         ASSERT(info.fpr() == source);
476         spillAction = StoreDouble;
477     }
478         
479 #if USE(JSVALUE64)
480     if (node->hasConstant()) {
481         node->asNumber(); // To get the assertion.
482         fillAction = SetDoubleConstant;
483     } else {
484         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
485         fillAction = LoadDouble;
486     }
487 #elif USE(JSVALUE32_64)
488     ASSERT(info.registerFormat() == DataFormatDouble);
489     if (node->hasConstant()) {
490         node->asNumber(); // To get the assertion.
491         fillAction = SetDoubleConstant;
492     } else
493         fillAction = LoadDouble;
494 #endif
495
496     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
497 }
498     
499 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
500 {
501     switch (plan.spillAction()) {
502     case DoNothingForSpill:
503         break;
504     case Store32Tag:
505         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
506         break;
507     case Store32Payload:
508         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
509         break;
510     case StorePtr:
511         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
512         break;
513 #if USE(JSVALUE64)
514     case Store64:
515         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
516         break;
517 #endif
518     case StoreDouble:
519         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
520         break;
521     default:
522         RELEASE_ASSERT_NOT_REACHED();
523     }
524 }
525     
526 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
527 {
528 #if USE(JSVALUE32_64)
529     UNUSED_PARAM(canTrample);
530 #endif
531     switch (plan.fillAction()) {
532     case DoNothingForFill:
533         break;
534     case SetInt32Constant:
535         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
536         break;
537 #if USE(JSVALUE64)
538     case SetInt52Constant:
539         m_jit.move(Imm64(plan.node()->asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
540         break;
541     case SetStrictInt52Constant:
542         m_jit.move(Imm64(plan.node()->asMachineInt()), plan.gpr());
543         break;
544 #endif // USE(JSVALUE64)
545     case SetBooleanConstant:
546         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
547         break;
548     case SetCellConstant:
549         m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
550         break;
551 #if USE(JSVALUE64)
552     case SetTrustedJSConstant:
553         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
554         break;
555     case SetJSConstant:
556         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
557         break;
558     case SetDoubleConstant:
559         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
560         m_jit.move64ToDouble(canTrample, plan.fpr());
561         break;
562     case Load32PayloadBoxInt:
563         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
564         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
565         break;
566     case Load32PayloadConvertToInt52:
567         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
568         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
569         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
570         break;
571     case Load32PayloadSignExtend:
572         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
573         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
574         break;
575 #else
576     case SetJSConstantTag:
577         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
578         break;
579     case SetJSConstantPayload:
580         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
581         break;
582     case SetInt32Tag:
583         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
584         break;
585     case SetCellTag:
586         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
587         break;
588     case SetBooleanTag:
589         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
590         break;
591     case SetDoubleConstant:
592         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
593         break;
594 #endif
595     case Load32Tag:
596         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
597         break;
598     case Load32Payload:
599         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
600         break;
601     case LoadPtr:
602         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
603         break;
604 #if USE(JSVALUE64)
605     case Load64:
606         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
607         break;
608     case Load64ShiftInt52Right:
609         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
610         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
611         break;
612     case Load64ShiftInt52Left:
613         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
614         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
615         break;
616 #endif
617     case LoadDouble:
618         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
619         break;
620     default:
621         RELEASE_ASSERT_NOT_REACHED();
622     }
623 }
624     
625 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
626 {
627     switch (arrayMode.arrayClass()) {
628     case Array::OriginalArray: {
629         CRASH();
630 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
631         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
632         return result;
633 #endif
634     }
635         
636     case Array::Array:
637         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
638         return m_jit.branch32(
639             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
640         
641     case Array::NonArray:
642     case Array::OriginalNonArray:
643         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
644         return m_jit.branch32(
645             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
646         
647     case Array::PossiblyArray:
648         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
649         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
650     }
651     
652     RELEASE_ASSERT_NOT_REACHED();
653     return JITCompiler::Jump();
654 }
655
656 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
657 {
658     JITCompiler::JumpList result;
659     
660     switch (arrayMode.type()) {
661     case Array::Int32:
662         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
663
664     case Array::Double:
665         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
666
667     case Array::Contiguous:
668         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
669
670     case Array::ArrayStorage:
671     case Array::SlowPutArrayStorage: {
672         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
673         
674         if (arrayMode.isJSArray()) {
675             if (arrayMode.isSlowPut()) {
676                 result.append(
677                     m_jit.branchTest32(
678                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
679                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
680                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
681                 result.append(
682                     m_jit.branch32(
683                         MacroAssembler::Above, tempGPR,
684                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
685                 break;
686             }
687             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
688             result.append(
689                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
690             break;
691         }
692         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
693         if (arrayMode.isSlowPut()) {
694             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
695             result.append(
696                 m_jit.branch32(
697                     MacroAssembler::Above, tempGPR,
698                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
699             break;
700         }
701         result.append(
702             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
703         break;
704     }
705     default:
706         CRASH();
707         break;
708     }
709     
710     return result;
711 }
712
713 void SpeculativeJIT::checkArray(Node* node)
714 {
715     ASSERT(node->arrayMode().isSpecific());
716     ASSERT(!node->arrayMode().doesConversion());
717     
718     SpeculateCellOperand base(this, node->child1());
719     GPRReg baseReg = base.gpr();
720     
721     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
722         noResult(m_currentNode);
723         return;
724     }
725     
726     const ClassInfo* expectedClassInfo = 0;
727     
728     switch (node->arrayMode().type()) {
729     case Array::String:
730         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
731         break;
732     case Array::Int32:
733     case Array::Double:
734     case Array::Contiguous:
735     case Array::ArrayStorage:
736     case Array::SlowPutArrayStorage: {
737         GPRTemporary temp(this);
738         GPRReg tempGPR = temp.gpr();
739         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
740         speculationCheck(
741             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
742             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
743         
744         noResult(m_currentNode);
745         return;
746     }
747     case Array::DirectArguments:
748         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
749         noResult(m_currentNode);
750         return;
751     case Array::ScopedArguments:
752         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
753         noResult(m_currentNode);
754         return;
755     default:
756         speculateCellTypeWithoutTypeFiltering(
757             node->child1(), baseReg,
758             typeForTypedArrayType(node->arrayMode().typedArrayType()));
759         noResult(m_currentNode);
760         return;
761     }
762     
763     RELEASE_ASSERT(expectedClassInfo);
764     
765     GPRTemporary temp(this);
766     GPRTemporary temp2(this);
767     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
768     speculationCheck(
769         BadType, JSValueSource::unboxedCell(baseReg), node,
770         m_jit.branchPtr(
771             MacroAssembler::NotEqual,
772             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
773             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
774     
775     noResult(m_currentNode);
776 }
777
778 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
779 {
780     ASSERT(node->arrayMode().doesConversion());
781     
782     GPRTemporary temp(this);
783     GPRTemporary structure;
784     GPRReg tempGPR = temp.gpr();
785     GPRReg structureGPR = InvalidGPRReg;
786     
787     if (node->op() != ArrayifyToStructure) {
788         GPRTemporary realStructure(this);
789         structure.adopt(realStructure);
790         structureGPR = structure.gpr();
791     }
792         
793     // We can skip all that comes next if we already have array storage.
794     MacroAssembler::JumpList slowPath;
795     
796     if (node->op() == ArrayifyToStructure) {
797         slowPath.append(m_jit.branchWeakStructure(
798             JITCompiler::NotEqual,
799             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
800             node->structure()));
801     } else {
802         m_jit.load8(
803             MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
804         
805         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
806     }
807     
808     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
809         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
810     
811     noResult(m_currentNode);
812 }
813
814 void SpeculativeJIT::arrayify(Node* node)
815 {
816     ASSERT(node->arrayMode().isSpecific());
817     
818     SpeculateCellOperand base(this, node->child1());
819     
820     if (!node->child2()) {
821         arrayify(node, base.gpr(), InvalidGPRReg);
822         return;
823     }
824     
825     SpeculateInt32Operand property(this, node->child2());
826     
827     arrayify(node, base.gpr(), property.gpr());
828 }
829
830 GPRReg SpeculativeJIT::fillStorage(Edge edge)
831 {
832     VirtualRegister virtualRegister = edge->virtualRegister();
833     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
834     
835     switch (info.registerFormat()) {
836     case DataFormatNone: {
837         if (info.spillFormat() == DataFormatStorage) {
838             GPRReg gpr = allocate();
839             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
840             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
841             info.fillStorage(*m_stream, gpr);
842             return gpr;
843         }
844         
845         // Must be a cell; fill it as a cell and then return the pointer.
846         return fillSpeculateCell(edge);
847     }
848         
849     case DataFormatStorage: {
850         GPRReg gpr = info.gpr();
851         m_gprs.lock(gpr);
852         return gpr;
853     }
854         
855     default:
856         return fillSpeculateCell(edge);
857     }
858 }
859
860 void SpeculativeJIT::useChildren(Node* node)
861 {
862     if (node->flags() & NodeHasVarArgs) {
863         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
864             if (!!m_jit.graph().m_varArgChildren[childIdx])
865                 use(m_jit.graph().m_varArgChildren[childIdx]);
866         }
867     } else {
868         Edge child1 = node->child1();
869         if (!child1) {
870             ASSERT(!node->child2() && !node->child3());
871             return;
872         }
873         use(child1);
874         
875         Edge child2 = node->child2();
876         if (!child2) {
877             ASSERT(!node->child3());
878             return;
879         }
880         use(child2);
881         
882         Edge child3 = node->child3();
883         if (!child3)
884             return;
885         use(child3);
886     }
887 }
888
889 void SpeculativeJIT::compileIn(Node* node)
890 {
891     SpeculateCellOperand base(this, node->child2());
892     GPRReg baseGPR = base.gpr();
893     
894     if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
895         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
896             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo();
897             
898             GPRTemporary result(this);
899             GPRReg resultGPR = result.gpr();
900
901             use(node->child1());
902             
903             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
904             MacroAssembler::Label done = m_jit.label();
905             
906             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
907             // we can cast it to const AtomicStringImpl* safely.
908             auto slowPath = slowPathCall(
909                 jump.m_jump, this, operationInOptimize,
910                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
911                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
912             
913             stubInfo->codeOrigin = node->origin.semantic;
914             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
915             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
916             stubInfo->patch.usedRegisters = usedRegisters();
917             stubInfo->patch.spillMode = NeedToSpill;
918
919             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
920             addSlowPathGenerator(WTF::move(slowPath));
921
922             base.use();
923
924             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
925             return;
926         }
927     }
928
929     JSValueOperand key(this, node->child1());
930     JSValueRegs regs = key.jsValueRegs();
931         
932     GPRFlushedCallResult result(this);
933     GPRReg resultGPR = result.gpr();
934         
935     base.use();
936     key.use();
937         
938     flushRegisters();
939     callOperation(
940         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
941         baseGPR, regs);
942     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
943 }
944
945 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
946 {
947     unsigned branchIndexInBlock = detectPeepHoleBranch();
948     if (branchIndexInBlock != UINT_MAX) {
949         Node* branchNode = m_block->at(branchIndexInBlock);
950
951         ASSERT(node->adjustedRefCount() == 1);
952         
953         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
954     
955         m_indexInBlock = branchIndexInBlock;
956         m_currentNode = branchNode;
957         
958         return true;
959     }
960     
961     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
962     
963     return false;
964 }
965
966 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
967 {
968     unsigned branchIndexInBlock = detectPeepHoleBranch();
969     if (branchIndexInBlock != UINT_MAX) {
970         Node* branchNode = m_block->at(branchIndexInBlock);
971
972         ASSERT(node->adjustedRefCount() == 1);
973         
974         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
975     
976         m_indexInBlock = branchIndexInBlock;
977         m_currentNode = branchNode;
978         
979         return true;
980     }
981     
982     nonSpeculativeNonPeepholeStrictEq(node, invert);
983     
984     return false;
985 }
986
987 static const char* dataFormatString(DataFormat format)
988 {
989     // These values correspond to the DataFormat enum.
990     const char* strings[] = {
991         "[  ]",
992         "[ i]",
993         "[ d]",
994         "[ c]",
995         "Err!",
996         "Err!",
997         "Err!",
998         "Err!",
999         "[J ]",
1000         "[Ji]",
1001         "[Jd]",
1002         "[Jc]",
1003         "Err!",
1004         "Err!",
1005         "Err!",
1006         "Err!",
1007     };
1008     return strings[format];
1009 }
1010
1011 void SpeculativeJIT::dump(const char* label)
1012 {
1013     if (label)
1014         dataLogF("<%s>\n", label);
1015
1016     dataLogF("  gprs:\n");
1017     m_gprs.dump();
1018     dataLogF("  fprs:\n");
1019     m_fprs.dump();
1020     dataLogF("  VirtualRegisters:\n");
1021     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1022         GenerationInfo& info = m_generationInfo[i];
1023         if (info.alive())
1024             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1025         else
1026             dataLogF("    % 3d:[__][__]", i);
1027         if (info.registerFormat() == DataFormatDouble)
1028             dataLogF(":fpr%d\n", info.fpr());
1029         else if (info.registerFormat() != DataFormatNone
1030 #if USE(JSVALUE32_64)
1031             && !(info.registerFormat() & DataFormatJS)
1032 #endif
1033             ) {
1034             ASSERT(info.gpr() != InvalidGPRReg);
1035             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1036         } else
1037             dataLogF("\n");
1038     }
1039     if (label)
1040         dataLogF("</%s>\n", label);
1041 }
1042
1043 GPRTemporary::GPRTemporary()
1044     : m_jit(0)
1045     , m_gpr(InvalidGPRReg)
1046 {
1047 }
1048
1049 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1050     : m_jit(jit)
1051     , m_gpr(InvalidGPRReg)
1052 {
1053     m_gpr = m_jit->allocate();
1054 }
1055
1056 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1057     : m_jit(jit)
1058     , m_gpr(InvalidGPRReg)
1059 {
1060     m_gpr = m_jit->allocate(specific);
1061 }
1062
1063 #if USE(JSVALUE32_64)
1064 GPRTemporary::GPRTemporary(
1065     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1066     : m_jit(jit)
1067     , m_gpr(InvalidGPRReg)
1068 {
1069     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1070         m_gpr = m_jit->reuse(op1.gpr(which));
1071     else
1072         m_gpr = m_jit->allocate();
1073 }
1074 #endif // USE(JSVALUE32_64)
1075
1076 JSValueRegsTemporary::JSValueRegsTemporary() { }
1077
1078 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1079 #if USE(JSVALUE64)
1080     : m_gpr(jit)
1081 #else
1082     : m_payloadGPR(jit)
1083     , m_tagGPR(jit)
1084 #endif
1085 {
1086 }
1087
1088 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1089
1090 JSValueRegs JSValueRegsTemporary::regs()
1091 {
1092 #if USE(JSVALUE64)
1093     return JSValueRegs(m_gpr.gpr());
1094 #else
1095     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1096 #endif
1097 }
1098
1099 void GPRTemporary::adopt(GPRTemporary& other)
1100 {
1101     ASSERT(!m_jit);
1102     ASSERT(m_gpr == InvalidGPRReg);
1103     ASSERT(other.m_jit);
1104     ASSERT(other.m_gpr != InvalidGPRReg);
1105     m_jit = other.m_jit;
1106     m_gpr = other.m_gpr;
1107     other.m_jit = 0;
1108     other.m_gpr = InvalidGPRReg;
1109 }
1110
1111 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1112     : m_jit(jit)
1113     , m_fpr(InvalidFPRReg)
1114 {
1115     m_fpr = m_jit->fprAllocate();
1116 }
1117
1118 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1119     : m_jit(jit)
1120     , m_fpr(InvalidFPRReg)
1121 {
1122     if (m_jit->canReuse(op1.node()))
1123         m_fpr = m_jit->reuse(op1.fpr());
1124     else
1125         m_fpr = m_jit->fprAllocate();
1126 }
1127
1128 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1129     : m_jit(jit)
1130     , m_fpr(InvalidFPRReg)
1131 {
1132     if (m_jit->canReuse(op1.node()))
1133         m_fpr = m_jit->reuse(op1.fpr());
1134     else if (m_jit->canReuse(op2.node()))
1135         m_fpr = m_jit->reuse(op2.fpr());
1136     else
1137         m_fpr = m_jit->fprAllocate();
1138 }
1139
1140 #if USE(JSVALUE32_64)
1141 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1142     : m_jit(jit)
1143     , m_fpr(InvalidFPRReg)
1144 {
1145     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1146         m_fpr = m_jit->reuse(op1.fpr());
1147     else
1148         m_fpr = m_jit->fprAllocate();
1149 }
1150 #endif
1151
1152 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1153 {
1154     BasicBlock* taken = branchNode->branchData()->taken.block;
1155     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1156     
1157     SpeculateDoubleOperand op1(this, node->child1());
1158     SpeculateDoubleOperand op2(this, node->child2());
1159     
1160     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1161     jump(notTaken);
1162 }
1163
1164 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1165 {
1166     BasicBlock* taken = branchNode->branchData()->taken.block;
1167     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1168
1169     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1170     
1171     if (taken == nextBlock()) {
1172         condition = MacroAssembler::NotEqual;
1173         BasicBlock* tmp = taken;
1174         taken = notTaken;
1175         notTaken = tmp;
1176     }
1177
1178     SpeculateCellOperand op1(this, node->child1());
1179     SpeculateCellOperand op2(this, node->child2());
1180     
1181     GPRReg op1GPR = op1.gpr();
1182     GPRReg op2GPR = op2.gpr();
1183     
1184     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1185         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1186             speculationCheck(
1187                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1188         }
1189         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1190             speculationCheck(
1191                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1192         }
1193     } else {
1194         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1195             speculationCheck(
1196                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1197                 m_jit.branchIfNotObject(op1GPR));
1198         }
1199         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1200             m_jit.branchTest8(
1201                 MacroAssembler::NonZero, 
1202                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1203                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1204
1205         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1206             speculationCheck(
1207                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1208                 m_jit.branchIfNotObject(op2GPR));
1209         }
1210         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1211             m_jit.branchTest8(
1212                 MacroAssembler::NonZero, 
1213                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1214                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1215     }
1216
1217     branchPtr(condition, op1GPR, op2GPR, taken);
1218     jump(notTaken);
1219 }
1220
1221 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1222 {
1223     BasicBlock* taken = branchNode->branchData()->taken.block;
1224     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1225
1226     // The branch instruction will branch to the taken block.
1227     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1228     if (taken == nextBlock()) {
1229         condition = JITCompiler::invert(condition);
1230         BasicBlock* tmp = taken;
1231         taken = notTaken;
1232         notTaken = tmp;
1233     }
1234
1235     if (node->child1()->isBooleanConstant()) {
1236         bool imm = node->child1()->asBoolean();
1237         SpeculateBooleanOperand op2(this, node->child2());
1238         branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1239     } else if (node->child2()->isBooleanConstant()) {
1240         SpeculateBooleanOperand op1(this, node->child1());
1241         bool imm = node->child2()->asBoolean();
1242         branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1243     } else {
1244         SpeculateBooleanOperand op1(this, node->child1());
1245         SpeculateBooleanOperand op2(this, node->child2());
1246         branch32(condition, op1.gpr(), op2.gpr(), taken);
1247     }
1248
1249     jump(notTaken);
1250 }
1251
1252 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1253 {
1254     BasicBlock* taken = branchNode->branchData()->taken.block;
1255     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1256
1257     // The branch instruction will branch to the taken block.
1258     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1259     if (taken == nextBlock()) {
1260         condition = JITCompiler::invert(condition);
1261         BasicBlock* tmp = taken;
1262         taken = notTaken;
1263         notTaken = tmp;
1264     }
1265
1266     if (node->child1()->isInt32Constant()) {
1267         int32_t imm = node->child1()->asInt32();
1268         SpeculateInt32Operand op2(this, node->child2());
1269         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1270     } else if (node->child2()->isInt32Constant()) {
1271         SpeculateInt32Operand op1(this, node->child1());
1272         int32_t imm = node->child2()->asInt32();
1273         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1274     } else {
1275         SpeculateInt32Operand op1(this, node->child1());
1276         SpeculateInt32Operand op2(this, node->child2());
1277         branch32(condition, op1.gpr(), op2.gpr(), taken);
1278     }
1279
1280     jump(notTaken);
1281 }
1282
1283 // Returns true if the compare is fused with a subsequent branch.
1284 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1285 {
1286     // Fused compare & branch.
1287     unsigned branchIndexInBlock = detectPeepHoleBranch();
1288     if (branchIndexInBlock != UINT_MAX) {
1289         Node* branchNode = m_block->at(branchIndexInBlock);
1290
1291         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1292         // so can be no intervening nodes to also reference the compare. 
1293         ASSERT(node->adjustedRefCount() == 1);
1294
1295         if (node->isBinaryUseKind(Int32Use))
1296             compilePeepHoleInt32Branch(node, branchNode, condition);
1297 #if USE(JSVALUE64)
1298         else if (node->isBinaryUseKind(Int52RepUse))
1299             compilePeepHoleInt52Branch(node, branchNode, condition);
1300 #endif // USE(JSVALUE64)
1301         else if (node->isBinaryUseKind(DoubleRepUse))
1302             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1303         else if (node->op() == CompareEq) {
1304             if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1305                 // Use non-peephole comparison, for now.
1306                 return false;
1307             }
1308             if (node->isBinaryUseKind(BooleanUse))
1309                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1310             else if (node->isBinaryUseKind(ObjectUse))
1311                 compilePeepHoleObjectEquality(node, branchNode);
1312             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1313                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1314             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1315                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1316             else {
1317                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1318                 return true;
1319             }
1320         } else {
1321             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1322             return true;
1323         }
1324
1325         use(node->child1());
1326         use(node->child2());
1327         m_indexInBlock = branchIndexInBlock;
1328         m_currentNode = branchNode;
1329         return true;
1330     }
1331     return false;
1332 }
1333
1334 void SpeculativeJIT::noticeOSRBirth(Node* node)
1335 {
1336     if (!node->hasVirtualRegister())
1337         return;
1338     
1339     VirtualRegister virtualRegister = node->virtualRegister();
1340     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1341     
1342     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1343 }
1344
1345 void SpeculativeJIT::compileMovHint(Node* node)
1346 {
1347     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1348     
1349     Node* child = node->child1().node();
1350     noticeOSRBirth(child);
1351     
1352     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1353 }
1354
1355 void SpeculativeJIT::bail(AbortReason reason)
1356 {
1357     if (verboseCompilationEnabled())
1358         dataLog("Bailing compilation.\n");
1359     m_compileOkay = true;
1360     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1361     clearGenerationInfo();
1362 }
1363
1364 void SpeculativeJIT::compileCurrentBlock()
1365 {
1366     ASSERT(m_compileOkay);
1367     
1368     if (!m_block)
1369         return;
1370     
1371     ASSERT(m_block->isReachable);
1372     
1373     m_jit.blockHeads()[m_block->index] = m_jit.label();
1374
1375     if (!m_block->intersectionOfCFAHasVisited) {
1376         // Don't generate code for basic blocks that are unreachable according to CFA.
1377         // But to be sure that nobody has generated a jump to this block, drop in a
1378         // breakpoint here.
1379         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1380         return;
1381     }
1382
1383     m_stream->appendAndLog(VariableEvent::reset());
1384     
1385     m_jit.jitAssertHasValidCallFrame();
1386     m_jit.jitAssertTagsInPlace();
1387     m_jit.jitAssertArgumentCountSane();
1388
1389     m_state.reset();
1390     m_state.beginBasicBlock(m_block);
1391     
1392     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1393         int operand = m_block->variablesAtHead.operandForIndex(i);
1394         Node* node = m_block->variablesAtHead[i];
1395         if (!node)
1396             continue; // No need to record dead SetLocal's.
1397         
1398         VariableAccessData* variable = node->variableAccessData();
1399         DataFormat format;
1400         if (!node->refCount())
1401             continue; // No need to record dead SetLocal's.
1402         format = dataFormatFor(variable->flushFormat());
1403         m_stream->appendAndLog(
1404             VariableEvent::setLocal(
1405                 VirtualRegister(operand),
1406                 variable->machineLocal(),
1407                 format));
1408     }
1409     
1410     m_codeOriginForExitTarget = CodeOrigin();
1411     m_codeOriginForExitProfile = CodeOrigin();
1412     
1413     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1414         m_currentNode = m_block->at(m_indexInBlock);
1415         
1416         // We may have hit a contradiction that the CFA was aware of but that the JIT
1417         // didn't cause directly.
1418         if (!m_state.isValid()) {
1419             bail(DFGBailedAtTopOfBlock);
1420             return;
1421         }
1422
1423         if (ASSERT_DISABLED)
1424             m_canExit = true; // Essentially disable the assertions.
1425         else
1426             m_canExit = mayExit(m_jit.graph(), m_currentNode);
1427         
1428         m_interpreter.startExecuting();
1429         m_jit.setForNode(m_currentNode);
1430         m_codeOriginForExitTarget = m_currentNode->origin.forExit;
1431         m_codeOriginForExitProfile = m_currentNode->origin.semantic;
1432         m_lastGeneratedNode = m_currentNode->op();
1433         
1434         ASSERT(m_currentNode->shouldGenerate());
1435         
1436         if (verboseCompilationEnabled()) {
1437             dataLogF(
1438                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1439                 (int)m_currentNode->index(),
1440                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1441             dataLog("\n");
1442         }
1443         
1444         compile(m_currentNode);
1445         
1446         if (belongsInMinifiedGraph(m_currentNode->op()))
1447             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1448         
1449 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1450         m_jit.clearRegisterAllocationOffsets();
1451 #endif
1452         
1453         if (!m_compileOkay) {
1454             bail(DFGBailedAtEndOfNode);
1455             return;
1456         }
1457         
1458         // Make sure that the abstract state is rematerialized for the next node.
1459         m_interpreter.executeEffects(m_indexInBlock);
1460     }
1461     
1462     // Perform the most basic verification that children have been used correctly.
1463     if (!ASSERT_DISABLED) {
1464         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1465             GenerationInfo& info = m_generationInfo[index];
1466             RELEASE_ASSERT(!info.alive());
1467         }
1468     }
1469 }
1470
1471 // If we are making type predictions about our arguments then
1472 // we need to check that they are correct on function entry.
1473 void SpeculativeJIT::checkArgumentTypes()
1474 {
1475     ASSERT(!m_currentNode);
1476     m_isCheckingArgumentTypes = true;
1477     m_codeOriginForExitTarget = CodeOrigin(0);
1478     m_codeOriginForExitProfile = CodeOrigin(0);
1479
1480     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1481         Node* node = m_jit.graph().m_arguments[i];
1482         if (!node) {
1483             // The argument is dead. We don't do any checks for such arguments.
1484             continue;
1485         }
1486         
1487         ASSERT(node->op() == SetArgument);
1488         ASSERT(node->shouldGenerate());
1489
1490         VariableAccessData* variableAccessData = node->variableAccessData();
1491         FlushFormat format = variableAccessData->flushFormat();
1492         
1493         if (format == FlushedJSValue)
1494             continue;
1495         
1496         VirtualRegister virtualRegister = variableAccessData->local();
1497
1498         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1499         
1500 #if USE(JSVALUE64)
1501         switch (format) {
1502         case FlushedInt32: {
1503             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1504             break;
1505         }
1506         case FlushedBoolean: {
1507             GPRTemporary temp(this);
1508             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1509             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1510             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1511             break;
1512         }
1513         case FlushedCell: {
1514             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1515             break;
1516         }
1517         default:
1518             RELEASE_ASSERT_NOT_REACHED();
1519             break;
1520         }
1521 #else
1522         switch (format) {
1523         case FlushedInt32: {
1524             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1525             break;
1526         }
1527         case FlushedBoolean: {
1528             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1529             break;
1530         }
1531         case FlushedCell: {
1532             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1533             break;
1534         }
1535         default:
1536             RELEASE_ASSERT_NOT_REACHED();
1537             break;
1538         }
1539 #endif
1540     }
1541     m_isCheckingArgumentTypes = false;
1542 }
1543
1544 bool SpeculativeJIT::compile()
1545 {
1546     checkArgumentTypes();
1547     
1548     ASSERT(!m_currentNode);
1549     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1550         m_jit.setForBlockIndex(blockIndex);
1551         m_block = m_jit.graph().block(blockIndex);
1552         compileCurrentBlock();
1553     }
1554     linkBranches();
1555     return true;
1556 }
1557
1558 void SpeculativeJIT::createOSREntries()
1559 {
1560     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1561         BasicBlock* block = m_jit.graph().block(blockIndex);
1562         if (!block)
1563             continue;
1564         if (!block->isOSRTarget)
1565             continue;
1566         
1567         // Currently we don't have OSR entry trampolines. We could add them
1568         // here if need be.
1569         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1570     }
1571 }
1572
1573 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1574 {
1575     unsigned osrEntryIndex = 0;
1576     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1577         BasicBlock* block = m_jit.graph().block(blockIndex);
1578         if (!block)
1579             continue;
1580         if (!block->isOSRTarget)
1581             continue;
1582         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1583     }
1584     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1585 }
1586
1587 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1588 {
1589     Edge child3 = m_jit.graph().varArgChild(node, 2);
1590     Edge child4 = m_jit.graph().varArgChild(node, 3);
1591
1592     ArrayMode arrayMode = node->arrayMode();
1593     
1594     GPRReg baseReg = base.gpr();
1595     GPRReg propertyReg = property.gpr();
1596     
1597     SpeculateDoubleOperand value(this, child3);
1598
1599     FPRReg valueReg = value.fpr();
1600     
1601     DFG_TYPE_CHECK(
1602         JSValueRegs(), child3, SpecFullRealNumber,
1603         m_jit.branchDouble(
1604             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1605     
1606     if (!m_compileOkay)
1607         return;
1608     
1609     StorageOperand storage(this, child4);
1610     GPRReg storageReg = storage.gpr();
1611
1612     if (node->op() == PutByValAlias) {
1613         // Store the value to the array.
1614         GPRReg propertyReg = property.gpr();
1615         FPRReg valueReg = value.fpr();
1616         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1617         
1618         noResult(m_currentNode);
1619         return;
1620     }
1621     
1622     GPRTemporary temporary;
1623     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1624
1625     MacroAssembler::Jump slowCase;
1626     
1627     if (arrayMode.isInBounds()) {
1628         speculationCheck(
1629             OutOfBounds, JSValueRegs(), 0,
1630             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1631     } else {
1632         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1633         
1634         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1635         
1636         if (!arrayMode.isOutOfBounds())
1637             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1638         
1639         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1640         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1641         
1642         inBounds.link(&m_jit);
1643     }
1644     
1645     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1646
1647     base.use();
1648     property.use();
1649     value.use();
1650     storage.use();
1651     
1652     if (arrayMode.isOutOfBounds()) {
1653         addSlowPathGenerator(
1654             slowPathCall(
1655                 slowCase, this,
1656                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1657                 NoResult, baseReg, propertyReg, valueReg));
1658     }
1659
1660     noResult(m_currentNode, UseChildrenCalledExplicitly);
1661 }
1662
1663 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1664 {
1665     SpeculateCellOperand string(this, node->child1());
1666     SpeculateStrictInt32Operand index(this, node->child2());
1667     StorageOperand storage(this, node->child3());
1668
1669     GPRReg stringReg = string.gpr();
1670     GPRReg indexReg = index.gpr();
1671     GPRReg storageReg = storage.gpr();
1672     
1673     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1674
1675     // unsigned comparison so we can filter out negative indices and indices that are too large
1676     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1677
1678     GPRTemporary scratch(this);
1679     GPRReg scratchReg = scratch.gpr();
1680
1681     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1682
1683     // Load the character into scratchReg
1684     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1685
1686     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1687     JITCompiler::Jump cont8Bit = m_jit.jump();
1688
1689     is16Bit.link(&m_jit);
1690
1691     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1692
1693     cont8Bit.link(&m_jit);
1694
1695     int32Result(scratchReg, m_currentNode);
1696 }
1697
1698 void SpeculativeJIT::compileGetByValOnString(Node* node)
1699 {
1700     SpeculateCellOperand base(this, node->child1());
1701     SpeculateStrictInt32Operand property(this, node->child2());
1702     StorageOperand storage(this, node->child3());
1703     GPRReg baseReg = base.gpr();
1704     GPRReg propertyReg = property.gpr();
1705     GPRReg storageReg = storage.gpr();
1706
1707     GPRTemporary scratch(this);
1708     GPRReg scratchReg = scratch.gpr();
1709 #if USE(JSVALUE32_64)
1710     GPRTemporary resultTag;
1711     GPRReg resultTagReg = InvalidGPRReg;
1712     if (node->arrayMode().isOutOfBounds()) {
1713         GPRTemporary realResultTag(this);
1714         resultTag.adopt(realResultTag);
1715         resultTagReg = resultTag.gpr();
1716     }
1717 #endif
1718
1719     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1720
1721     // unsigned comparison so we can filter out negative indices and indices that are too large
1722     JITCompiler::Jump outOfBounds = m_jit.branch32(
1723         MacroAssembler::AboveOrEqual, propertyReg,
1724         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1725     if (node->arrayMode().isInBounds())
1726         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1727
1728     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1729
1730     // Load the character into scratchReg
1731     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1732
1733     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1734     JITCompiler::Jump cont8Bit = m_jit.jump();
1735
1736     is16Bit.link(&m_jit);
1737
1738     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1739
1740     JITCompiler::Jump bigCharacter =
1741         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1742
1743     // 8 bit string values don't need the isASCII check.
1744     cont8Bit.link(&m_jit);
1745
1746     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1747     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1748     m_jit.loadPtr(scratchReg, scratchReg);
1749
1750     addSlowPathGenerator(
1751         slowPathCall(
1752             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1753
1754     if (node->arrayMode().isOutOfBounds()) {
1755 #if USE(JSVALUE32_64)
1756         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1757 #endif
1758
1759         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1760         if (globalObject->stringPrototypeChainIsSane()) {
1761             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
1762             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
1763             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
1764             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
1765             // indexed properties either.
1766             // https://bugs.webkit.org/show_bug.cgi?id=144668
1767             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
1768             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
1769             
1770 #if USE(JSVALUE64)
1771             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1772                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
1773 #else
1774             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1775                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
1776                 baseReg, propertyReg));
1777 #endif
1778         } else {
1779 #if USE(JSVALUE64)
1780             addSlowPathGenerator(
1781                 slowPathCall(
1782                     outOfBounds, this, operationGetByValStringInt,
1783                     scratchReg, baseReg, propertyReg));
1784 #else
1785             addSlowPathGenerator(
1786                 slowPathCall(
1787                     outOfBounds, this, operationGetByValStringInt,
1788                     resultTagReg, scratchReg, baseReg, propertyReg));
1789 #endif
1790         }
1791         
1792 #if USE(JSVALUE64)
1793         jsValueResult(scratchReg, m_currentNode);
1794 #else
1795         jsValueResult(resultTagReg, scratchReg, m_currentNode);
1796 #endif
1797     } else
1798         cellResult(scratchReg, m_currentNode);
1799 }
1800
1801 void SpeculativeJIT::compileFromCharCode(Node* node)
1802 {
1803     SpeculateStrictInt32Operand property(this, node->child1());
1804     GPRReg propertyReg = property.gpr();
1805     GPRTemporary smallStrings(this);
1806     GPRTemporary scratch(this);
1807     GPRReg scratchReg = scratch.gpr();
1808     GPRReg smallStringsReg = smallStrings.gpr();
1809
1810     JITCompiler::JumpList slowCases;
1811     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
1812     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
1813     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
1814
1815     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
1816     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
1817     cellResult(scratchReg, m_currentNode);
1818 }
1819
1820 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
1821 {
1822     VirtualRegister virtualRegister = node->virtualRegister();
1823     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1824
1825     switch (info.registerFormat()) {
1826     case DataFormatStorage:
1827         RELEASE_ASSERT_NOT_REACHED();
1828
1829     case DataFormatBoolean:
1830     case DataFormatCell:
1831         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1832         return GeneratedOperandTypeUnknown;
1833
1834     case DataFormatNone:
1835     case DataFormatJSCell:
1836     case DataFormatJS:
1837     case DataFormatJSBoolean:
1838     case DataFormatJSDouble:
1839         return GeneratedOperandJSValue;
1840
1841     case DataFormatJSInt32:
1842     case DataFormatInt32:
1843         return GeneratedOperandInteger;
1844
1845     default:
1846         RELEASE_ASSERT_NOT_REACHED();
1847         return GeneratedOperandTypeUnknown;
1848     }
1849 }
1850
1851 void SpeculativeJIT::compileValueToInt32(Node* node)
1852 {
1853     switch (node->child1().useKind()) {
1854 #if USE(JSVALUE64)
1855     case Int52RepUse: {
1856         SpeculateStrictInt52Operand op1(this, node->child1());
1857         GPRTemporary result(this, Reuse, op1);
1858         GPRReg op1GPR = op1.gpr();
1859         GPRReg resultGPR = result.gpr();
1860         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
1861         int32Result(resultGPR, node, DataFormatInt32);
1862         return;
1863     }
1864 #endif // USE(JSVALUE64)
1865         
1866     case DoubleRepUse: {
1867         GPRTemporary result(this);
1868         SpeculateDoubleOperand op1(this, node->child1());
1869         FPRReg fpr = op1.fpr();
1870         GPRReg gpr = result.gpr();
1871         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
1872         
1873         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
1874         
1875         int32Result(gpr, node);
1876         return;
1877     }
1878     
1879     case NumberUse:
1880     case NotCellUse: {
1881         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
1882         case GeneratedOperandInteger: {
1883             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
1884             GPRTemporary result(this, Reuse, op1);
1885             m_jit.move(op1.gpr(), result.gpr());
1886             int32Result(result.gpr(), node, op1.format());
1887             return;
1888         }
1889         case GeneratedOperandJSValue: {
1890             GPRTemporary result(this);
1891 #if USE(JSVALUE64)
1892             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1893
1894             GPRReg gpr = op1.gpr();
1895             GPRReg resultGpr = result.gpr();
1896             FPRTemporary tempFpr(this);
1897             FPRReg fpr = tempFpr.fpr();
1898
1899             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
1900             JITCompiler::JumpList converted;
1901
1902             if (node->child1().useKind() == NumberUse) {
1903                 DFG_TYPE_CHECK(
1904                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
1905                     m_jit.branchTest64(
1906                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
1907             } else {
1908                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
1909                 
1910                 DFG_TYPE_CHECK(
1911                     JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
1912                 
1913                 // It's not a cell: so true turns into 1 and all else turns into 0.
1914                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
1915                 converted.append(m_jit.jump());
1916                 
1917                 isNumber.link(&m_jit);
1918             }
1919
1920             // First, if we get here we have a double encoded as a JSValue
1921             m_jit.move(gpr, resultGpr);
1922             unboxDouble(resultGpr, fpr);
1923
1924             silentSpillAllRegisters(resultGpr);
1925             callOperation(toInt32, resultGpr, fpr);
1926             silentFillAllRegisters(resultGpr);
1927
1928             converted.append(m_jit.jump());
1929
1930             isInteger.link(&m_jit);
1931             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
1932
1933             converted.link(&m_jit);
1934 #else
1935             Node* childNode = node->child1().node();
1936             VirtualRegister virtualRegister = childNode->virtualRegister();
1937             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1938
1939             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1940
1941             GPRReg payloadGPR = op1.payloadGPR();
1942             GPRReg resultGpr = result.gpr();
1943         
1944             JITCompiler::JumpList converted;
1945
1946             if (info.registerFormat() == DataFormatJSInt32)
1947                 m_jit.move(payloadGPR, resultGpr);
1948             else {
1949                 GPRReg tagGPR = op1.tagGPR();
1950                 FPRTemporary tempFpr(this);
1951                 FPRReg fpr = tempFpr.fpr();
1952                 FPRTemporary scratch(this);
1953
1954                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
1955
1956                 if (node->child1().useKind() == NumberUse) {
1957                     DFG_TYPE_CHECK(
1958                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
1959                         m_jit.branch32(
1960                             MacroAssembler::AboveOrEqual, tagGPR,
1961                             TrustedImm32(JSValue::LowestTag)));
1962                 } else {
1963                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
1964                     
1965                     DFG_TYPE_CHECK(
1966                         op1.jsValueRegs(), node->child1(), ~SpecCell,
1967                         m_jit.branchIfCell(op1.jsValueRegs()));
1968                     
1969                     // It's not a cell: so true turns into 1 and all else turns into 0.
1970                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
1971                     m_jit.move(TrustedImm32(0), resultGpr);
1972                     converted.append(m_jit.jump());
1973                     
1974                     isBoolean.link(&m_jit);
1975                     m_jit.move(payloadGPR, resultGpr);
1976                     converted.append(m_jit.jump());
1977                     
1978                     isNumber.link(&m_jit);
1979                 }
1980
1981                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
1982
1983                 silentSpillAllRegisters(resultGpr);
1984                 callOperation(toInt32, resultGpr, fpr);
1985                 silentFillAllRegisters(resultGpr);
1986
1987                 converted.append(m_jit.jump());
1988
1989                 isInteger.link(&m_jit);
1990                 m_jit.move(payloadGPR, resultGpr);
1991
1992                 converted.link(&m_jit);
1993             }
1994 #endif
1995             int32Result(resultGpr, node);
1996             return;
1997         }
1998         case GeneratedOperandTypeUnknown:
1999             RELEASE_ASSERT(!m_compileOkay);
2000             return;
2001         }
2002         RELEASE_ASSERT_NOT_REACHED();
2003         return;
2004     }
2005     
2006     default:
2007         ASSERT(!m_compileOkay);
2008         return;
2009     }
2010 }
2011
2012 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2013 {
2014     if (doesOverflow(node->arithMode())) {
2015         // We know that this sometimes produces doubles. So produce a double every
2016         // time. This at least allows subsequent code to not have weird conditionals.
2017             
2018         SpeculateInt32Operand op1(this, node->child1());
2019         FPRTemporary result(this);
2020             
2021         GPRReg inputGPR = op1.gpr();
2022         FPRReg outputFPR = result.fpr();
2023             
2024         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2025             
2026         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2027         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2028         positive.link(&m_jit);
2029             
2030         doubleResult(outputFPR, node);
2031         return;
2032     }
2033     
2034     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2035
2036     SpeculateInt32Operand op1(this, node->child1());
2037     GPRTemporary result(this);
2038
2039     m_jit.move(op1.gpr(), result.gpr());
2040
2041     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2042
2043     int32Result(result.gpr(), node, op1.format());
2044 }
2045
2046 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2047 {
2048     SpeculateDoubleOperand op1(this, node->child1());
2049     FPRTemporary scratch(this);
2050     GPRTemporary result(this);
2051     
2052     FPRReg valueFPR = op1.fpr();
2053     FPRReg scratchFPR = scratch.fpr();
2054     GPRReg resultGPR = result.gpr();
2055
2056     JITCompiler::JumpList failureCases;
2057     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2058     m_jit.branchConvertDoubleToInt32(
2059         valueFPR, resultGPR, failureCases, scratchFPR,
2060         shouldCheckNegativeZero(node->arithMode()));
2061     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2062
2063     int32Result(resultGPR, node);
2064 }
2065
2066 void SpeculativeJIT::compileDoubleRep(Node* node)
2067 {
2068     switch (node->child1().useKind()) {
2069     case RealNumberUse: {
2070         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2071         FPRTemporary result(this);
2072         
2073         JSValueRegs op1Regs = op1.jsValueRegs();
2074         FPRReg resultFPR = result.fpr();
2075         
2076 #if USE(JSVALUE64)
2077         GPRTemporary temp(this);
2078         GPRReg tempGPR = temp.gpr();
2079         m_jit.move(op1Regs.gpr(), tempGPR);
2080         m_jit.unboxDoubleWithoutAssertions(tempGPR, resultFPR);
2081 #else
2082         FPRTemporary temp(this);
2083         FPRReg tempFPR = temp.fpr();
2084         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2085 #endif
2086         
2087         JITCompiler::Jump done = m_jit.branchDouble(
2088             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2089         
2090         DFG_TYPE_CHECK(
2091             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2092         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2093         
2094         done.link(&m_jit);
2095         
2096         doubleResult(resultFPR, node);
2097         return;
2098     }
2099     
2100     case NotCellUse:
2101     case NumberUse: {
2102         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2103
2104         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2105         if (isInt32Speculation(possibleTypes)) {
2106             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2107             FPRTemporary result(this);
2108             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2109             doubleResult(result.fpr(), node);
2110             return;
2111         }
2112
2113         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2114         FPRTemporary result(this);
2115
2116 #if USE(JSVALUE64)
2117         GPRTemporary temp(this);
2118
2119         GPRReg op1GPR = op1.gpr();
2120         GPRReg tempGPR = temp.gpr();
2121         FPRReg resultFPR = result.fpr();
2122         JITCompiler::JumpList done;
2123
2124         JITCompiler::Jump isInteger = m_jit.branch64(
2125             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2126
2127         if (node->child1().useKind() == NotCellUse) {
2128             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2129             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2130
2131             static const double zero = 0;
2132             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2133
2134             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2135             done.append(isNull);
2136
2137             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2138                 m_jit.branchTest64(JITCompiler::NonZero, op1GPR, TrustedImm32(static_cast<int32_t>(~1))));
2139
2140             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2141             static const double one = 1;
2142             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2143             done.append(isFalse);
2144
2145             isUndefined.link(&m_jit);
2146             static const double NaN = PNaN;
2147             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2148             done.append(m_jit.jump());
2149
2150             isNumber.link(&m_jit);
2151         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2152             typeCheck(
2153                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2154                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2155         }
2156     
2157         m_jit.move(op1GPR, tempGPR);
2158         unboxDouble(tempGPR, resultFPR);
2159         done.append(m_jit.jump());
2160     
2161         isInteger.link(&m_jit);
2162         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2163         done.link(&m_jit);
2164 #else // USE(JSVALUE64) -> this is the 32_64 case
2165         FPRTemporary temp(this);
2166     
2167         GPRReg op1TagGPR = op1.tagGPR();
2168         GPRReg op1PayloadGPR = op1.payloadGPR();
2169         FPRReg tempFPR = temp.fpr();
2170         FPRReg resultFPR = result.fpr();
2171         JITCompiler::JumpList done;
2172     
2173         JITCompiler::Jump isInteger = m_jit.branch32(
2174             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2175
2176         if (node->child1().useKind() == NotCellUse) {
2177             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2178             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2179
2180             static const double zero = 0;
2181             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2182
2183             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2184             done.append(isNull);
2185
2186             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2187
2188             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2189             static const double one = 1;
2190             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2191             done.append(isFalse);
2192
2193             isUndefined.link(&m_jit);
2194             static const double NaN = PNaN;
2195             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2196             done.append(m_jit.jump());
2197
2198             isNumber.link(&m_jit);
2199         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2200             typeCheck(
2201                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2202                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2203         }
2204
2205         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2206         done.append(m_jit.jump());
2207     
2208         isInteger.link(&m_jit);
2209         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2210         done.link(&m_jit);
2211 #endif // USE(JSVALUE64)
2212     
2213         doubleResult(resultFPR, node);
2214         return;
2215     }
2216         
2217 #if USE(JSVALUE64)
2218     case Int52RepUse: {
2219         SpeculateStrictInt52Operand value(this, node->child1());
2220         FPRTemporary result(this);
2221         
2222         GPRReg valueGPR = value.gpr();
2223         FPRReg resultFPR = result.fpr();
2224
2225         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2226         
2227         doubleResult(resultFPR, node);
2228         return;
2229     }
2230 #endif // USE(JSVALUE64)
2231         
2232     default:
2233         RELEASE_ASSERT_NOT_REACHED();
2234         return;
2235     }
2236 }
2237
2238 void SpeculativeJIT::compileValueRep(Node* node)
2239 {
2240     switch (node->child1().useKind()) {
2241     case DoubleRepUse: {
2242         SpeculateDoubleOperand value(this, node->child1());
2243         JSValueRegsTemporary result(this);
2244         
2245         FPRReg valueFPR = value.fpr();
2246         JSValueRegs resultRegs = result.regs();
2247         
2248         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2249         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2250         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2251         // local was purified.
2252         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2253             m_jit.purifyNaN(valueFPR);
2254
2255         boxDouble(valueFPR, resultRegs);
2256         
2257         jsValueResult(resultRegs, node);
2258         return;
2259     }
2260         
2261 #if USE(JSVALUE64)
2262     case Int52RepUse: {
2263         SpeculateStrictInt52Operand value(this, node->child1());
2264         GPRTemporary result(this);
2265         
2266         GPRReg valueGPR = value.gpr();
2267         GPRReg resultGPR = result.gpr();
2268         
2269         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2270         
2271         jsValueResult(resultGPR, node);
2272         return;
2273     }
2274 #endif // USE(JSVALUE64)
2275         
2276     default:
2277         RELEASE_ASSERT_NOT_REACHED();
2278         return;
2279     }
2280 }
2281
2282 static double clampDoubleToByte(double d)
2283 {
2284     d += 0.5;
2285     if (!(d > 0))
2286         d = 0;
2287     else if (d > 255)
2288         d = 255;
2289     return d;
2290 }
2291
2292 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2293 {
2294     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2295     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2296     jit.xorPtr(result, result);
2297     MacroAssembler::Jump clamped = jit.jump();
2298     tooBig.link(&jit);
2299     jit.move(JITCompiler::TrustedImm32(255), result);
2300     clamped.link(&jit);
2301     inBounds.link(&jit);
2302 }
2303
2304 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2305 {
2306     // Unordered compare so we pick up NaN
2307     static const double zero = 0;
2308     static const double byteMax = 255;
2309     static const double half = 0.5;
2310     jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2311     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2312     jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2313     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2314     
2315     jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2316     // FIXME: This should probably just use a floating point round!
2317     // https://bugs.webkit.org/show_bug.cgi?id=72054
2318     jit.addDouble(source, scratch);
2319     jit.truncateDoubleToInt32(scratch, result);   
2320     MacroAssembler::Jump truncatedInt = jit.jump();
2321     
2322     tooSmall.link(&jit);
2323     jit.xorPtr(result, result);
2324     MacroAssembler::Jump zeroed = jit.jump();
2325     
2326     tooBig.link(&jit);
2327     jit.move(JITCompiler::TrustedImm32(255), result);
2328     
2329     truncatedInt.link(&jit);
2330     zeroed.link(&jit);
2331
2332 }
2333
2334 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2335 {
2336     if (node->op() == PutByValAlias)
2337         return JITCompiler::Jump();
2338     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2339         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2340     if (view) {
2341         uint32_t length = view->length();
2342         Node* indexNode = m_jit.graph().child(node, 1).node();
2343         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2344             return JITCompiler::Jump();
2345         return m_jit.branch32(
2346             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2347     }
2348     return m_jit.branch32(
2349         MacroAssembler::AboveOrEqual, indexGPR,
2350         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2351 }
2352
2353 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2354 {
2355     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2356     if (!jump.isSet())
2357         return;
2358     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2359 }
2360
2361 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2362 {
2363     ASSERT(isInt(type));
2364     
2365     SpeculateCellOperand base(this, node->child1());
2366     SpeculateStrictInt32Operand property(this, node->child2());
2367     StorageOperand storage(this, node->child3());
2368
2369     GPRReg baseReg = base.gpr();
2370     GPRReg propertyReg = property.gpr();
2371     GPRReg storageReg = storage.gpr();
2372
2373     GPRTemporary result(this);
2374     GPRReg resultReg = result.gpr();
2375
2376     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2377
2378     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2379     switch (elementSize(type)) {
2380     case 1:
2381         if (isSigned(type))
2382             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2383         else
2384             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2385         break;
2386     case 2:
2387         if (isSigned(type))
2388             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2389         else
2390             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2391         break;
2392     case 4:
2393         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2394         break;
2395     default:
2396         CRASH();
2397     }
2398     if (elementSize(type) < 4 || isSigned(type)) {
2399         int32Result(resultReg, node);
2400         return;
2401     }
2402     
2403     ASSERT(elementSize(type) == 4 && !isSigned(type));
2404     if (node->shouldSpeculateInt32()) {
2405         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2406         int32Result(resultReg, node);
2407         return;
2408     }
2409     
2410 #if USE(JSVALUE64)
2411     if (node->shouldSpeculateMachineInt()) {
2412         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2413         strictInt52Result(resultReg, node);
2414         return;
2415     }
2416 #endif
2417     
2418     FPRTemporary fresult(this);
2419     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2420     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2421     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2422     positive.link(&m_jit);
2423     doubleResult(fresult.fpr(), node);
2424 }
2425
2426 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2427 {
2428     ASSERT(isInt(type));
2429     
2430     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2431     GPRReg storageReg = storage.gpr();
2432     
2433     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2434     
2435     GPRTemporary value;
2436     GPRReg valueGPR = InvalidGPRReg;
2437     
2438     if (valueUse->isConstant()) {
2439         JSValue jsValue = valueUse->asJSValue();
2440         if (!jsValue.isNumber()) {
2441             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2442             noResult(node);
2443             return;
2444         }
2445         double d = jsValue.asNumber();
2446         if (isClamped(type)) {
2447             ASSERT(elementSize(type) == 1);
2448             d = clampDoubleToByte(d);
2449         }
2450         GPRTemporary scratch(this);
2451         GPRReg scratchReg = scratch.gpr();
2452         m_jit.move(Imm32(toInt32(d)), scratchReg);
2453         value.adopt(scratch);
2454         valueGPR = scratchReg;
2455     } else {
2456         switch (valueUse.useKind()) {
2457         case Int32Use: {
2458             SpeculateInt32Operand valueOp(this, valueUse);
2459             GPRTemporary scratch(this);
2460             GPRReg scratchReg = scratch.gpr();
2461             m_jit.move(valueOp.gpr(), scratchReg);
2462             if (isClamped(type)) {
2463                 ASSERT(elementSize(type) == 1);
2464                 compileClampIntegerToByte(m_jit, scratchReg);
2465             }
2466             value.adopt(scratch);
2467             valueGPR = scratchReg;
2468             break;
2469         }
2470             
2471 #if USE(JSVALUE64)
2472         case Int52RepUse: {
2473             SpeculateStrictInt52Operand valueOp(this, valueUse);
2474             GPRTemporary scratch(this);
2475             GPRReg scratchReg = scratch.gpr();
2476             m_jit.move(valueOp.gpr(), scratchReg);
2477             if (isClamped(type)) {
2478                 ASSERT(elementSize(type) == 1);
2479                 MacroAssembler::Jump inBounds = m_jit.branch64(
2480                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2481                 MacroAssembler::Jump tooBig = m_jit.branch64(
2482                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2483                 m_jit.move(TrustedImm32(0), scratchReg);
2484                 MacroAssembler::Jump clamped = m_jit.jump();
2485                 tooBig.link(&m_jit);
2486                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2487                 clamped.link(&m_jit);
2488                 inBounds.link(&m_jit);
2489             }
2490             value.adopt(scratch);
2491             valueGPR = scratchReg;
2492             break;
2493         }
2494 #endif // USE(JSVALUE64)
2495             
2496         case DoubleRepUse: {
2497             if (isClamped(type)) {
2498                 ASSERT(elementSize(type) == 1);
2499                 SpeculateDoubleOperand valueOp(this, valueUse);
2500                 GPRTemporary result(this);
2501                 FPRTemporary floatScratch(this);
2502                 FPRReg fpr = valueOp.fpr();
2503                 GPRReg gpr = result.gpr();
2504                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2505                 value.adopt(result);
2506                 valueGPR = gpr;
2507             } else {
2508                 SpeculateDoubleOperand valueOp(this, valueUse);
2509                 GPRTemporary result(this);
2510                 FPRReg fpr = valueOp.fpr();
2511                 GPRReg gpr = result.gpr();
2512                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2513                 m_jit.xorPtr(gpr, gpr);
2514                 MacroAssembler::Jump fixed = m_jit.jump();
2515                 notNaN.link(&m_jit);
2516                 
2517                 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2518                     fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2519                 
2520                 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr));
2521                 
2522                 fixed.link(&m_jit);
2523                 value.adopt(result);
2524                 valueGPR = gpr;
2525             }
2526             break;
2527         }
2528             
2529         default:
2530             RELEASE_ASSERT_NOT_REACHED();
2531             break;
2532         }
2533     }
2534     
2535     ASSERT_UNUSED(valueGPR, valueGPR != property);
2536     ASSERT(valueGPR != base);
2537     ASSERT(valueGPR != storageReg);
2538     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2539     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2540         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2541         outOfBounds = MacroAssembler::Jump();
2542     }
2543
2544     switch (elementSize(type)) {
2545     case 1:
2546         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2547         break;
2548     case 2:
2549         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2550         break;
2551     case 4:
2552         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2553         break;
2554     default:
2555         CRASH();
2556     }
2557     if (outOfBounds.isSet())
2558         outOfBounds.link(&m_jit);
2559     noResult(node);
2560 }
2561
2562 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2563 {
2564     ASSERT(isFloat(type));
2565     
2566     SpeculateCellOperand base(this, node->child1());
2567     SpeculateStrictInt32Operand property(this, node->child2());
2568     StorageOperand storage(this, node->child3());
2569
2570     GPRReg baseReg = base.gpr();
2571     GPRReg propertyReg = property.gpr();
2572     GPRReg storageReg = storage.gpr();
2573
2574     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2575
2576     FPRTemporary result(this);
2577     FPRReg resultReg = result.fpr();
2578     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2579     switch (elementSize(type)) {
2580     case 4:
2581         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2582         m_jit.convertFloatToDouble(resultReg, resultReg);
2583         break;
2584     case 8: {
2585         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2586         break;
2587     }
2588     default:
2589         RELEASE_ASSERT_NOT_REACHED();
2590     }
2591     
2592     doubleResult(resultReg, node);
2593 }
2594
2595 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2596 {
2597     ASSERT(isFloat(type));
2598     
2599     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2600     GPRReg storageReg = storage.gpr();
2601     
2602     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2603     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2604
2605     SpeculateDoubleOperand valueOp(this, valueUse);
2606     FPRTemporary scratch(this);
2607     FPRReg valueFPR = valueOp.fpr();
2608     FPRReg scratchFPR = scratch.fpr();
2609
2610     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2611     
2612     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2613     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2614         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2615         outOfBounds = MacroAssembler::Jump();
2616     }
2617     
2618     switch (elementSize(type)) {
2619     case 4: {
2620         m_jit.moveDouble(valueFPR, scratchFPR);
2621         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2622         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2623         break;
2624     }
2625     case 8:
2626         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2627         break;
2628     default:
2629         RELEASE_ASSERT_NOT_REACHED();
2630     }
2631     if (outOfBounds.isSet())
2632         outOfBounds.link(&m_jit);
2633     noResult(node);
2634 }
2635
2636 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2637 {
2638     // Check that prototype is an object.
2639     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
2640     
2641     // Initialize scratchReg with the value being checked.
2642     m_jit.move(valueReg, scratchReg);
2643     
2644     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2645     MacroAssembler::Label loop(&m_jit);
2646     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2647     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2648     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2649 #if USE(JSVALUE64)
2650     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2651 #else
2652     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2653 #endif
2654     
2655     // No match - result is false.
2656 #if USE(JSVALUE64)
2657     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2658 #else
2659     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2660 #endif
2661     MacroAssembler::Jump putResult = m_jit.jump();
2662     
2663     isInstance.link(&m_jit);
2664 #if USE(JSVALUE64)
2665     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2666 #else
2667     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2668 #endif
2669     
2670     putResult.link(&m_jit);
2671 }
2672
2673 void SpeculativeJIT::compileInstanceOf(Node* node)
2674 {
2675     if (node->child1().useKind() == UntypedUse) {
2676         // It might not be a cell. Speculate less aggressively.
2677         // Or: it might only be used once (i.e. by us), so we get zero benefit
2678         // from speculating any more aggressively than we absolutely need to.
2679         
2680         JSValueOperand value(this, node->child1());
2681         SpeculateCellOperand prototype(this, node->child2());
2682         GPRTemporary scratch(this);
2683         GPRTemporary scratch2(this);
2684         
2685         GPRReg prototypeReg = prototype.gpr();
2686         GPRReg scratchReg = scratch.gpr();
2687         GPRReg scratch2Reg = scratch2.gpr();
2688         
2689         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
2690         GPRReg valueReg = value.jsValueRegs().payloadGPR();
2691         moveFalseTo(scratchReg);
2692
2693         MacroAssembler::Jump done = m_jit.jump();
2694         
2695         isCell.link(&m_jit);
2696         
2697         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2698         
2699         done.link(&m_jit);
2700
2701         blessedBooleanResult(scratchReg, node);
2702         return;
2703     }
2704     
2705     SpeculateCellOperand value(this, node->child1());
2706     SpeculateCellOperand prototype(this, node->child2());
2707     
2708     GPRTemporary scratch(this);
2709     GPRTemporary scratch2(this);
2710     
2711     GPRReg valueReg = value.gpr();
2712     GPRReg prototypeReg = prototype.gpr();
2713     GPRReg scratchReg = scratch.gpr();
2714     GPRReg scratch2Reg = scratch2.gpr();
2715     
2716     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2717
2718     blessedBooleanResult(scratchReg, node);
2719 }
2720
2721 void SpeculativeJIT::compileAdd(Node* node)
2722 {
2723     switch (node->binaryUseKind()) {
2724     case Int32Use: {
2725         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2726         
2727         if (node->child1()->isInt32Constant()) {
2728             int32_t imm1 = node->child1()->asInt32();
2729             SpeculateInt32Operand op2(this, node->child2());
2730             GPRTemporary result(this);
2731
2732             if (!shouldCheckOverflow(node->arithMode())) {
2733                 m_jit.move(op2.gpr(), result.gpr());
2734                 m_jit.add32(Imm32(imm1), result.gpr());
2735             } else
2736                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
2737
2738             int32Result(result.gpr(), node);
2739             return;
2740         }
2741         
2742         if (node->child2()->isInt32Constant()) {
2743             SpeculateInt32Operand op1(this, node->child1());
2744             int32_t imm2 = node->child2()->asInt32();
2745             GPRTemporary result(this);
2746                 
2747             if (!shouldCheckOverflow(node->arithMode())) {
2748                 m_jit.move(op1.gpr(), result.gpr());
2749                 m_jit.add32(Imm32(imm2), result.gpr());
2750             } else
2751                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
2752
2753             int32Result(result.gpr(), node);
2754             return;
2755         }
2756                 
2757         SpeculateInt32Operand op1(this, node->child1());
2758         SpeculateInt32Operand op2(this, node->child2());
2759         GPRTemporary result(this, Reuse, op1, op2);
2760
2761         GPRReg gpr1 = op1.gpr();
2762         GPRReg gpr2 = op2.gpr();
2763         GPRReg gprResult = result.gpr();
2764
2765         if (!shouldCheckOverflow(node->arithMode())) {
2766             if (gpr1 == gprResult)
2767                 m_jit.add32(gpr2, gprResult);
2768             else {
2769                 m_jit.move(gpr2, gprResult);
2770                 m_jit.add32(gpr1, gprResult);
2771             }
2772         } else {
2773             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
2774                 
2775             if (gpr1 == gprResult)
2776                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
2777             else if (gpr2 == gprResult)
2778                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
2779             else
2780                 speculationCheck(Overflow, JSValueRegs(), 0, check);
2781         }
2782
2783         int32Result(gprResult, node);
2784         return;
2785     }
2786         
2787 #if USE(JSVALUE64)
2788     case Int52RepUse: {
2789         ASSERT(shouldCheckOverflow(node->arithMode()));
2790         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2791
2792         // Will we need an overflow check? If we can prove that neither input can be
2793         // Int52 then the overflow check will not be necessary.
2794         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2795             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2796             SpeculateWhicheverInt52Operand op1(this, node->child1());
2797             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2798             GPRTemporary result(this, Reuse, op1);
2799             m_jit.move(op1.gpr(), result.gpr());
2800             m_jit.add64(op2.gpr(), result.gpr());
2801             int52Result(result.gpr(), node, op1.format());
2802             return;
2803         }
2804         
2805         SpeculateInt52Operand op1(this, node->child1());
2806         SpeculateInt52Operand op2(this, node->child2());
2807         GPRTemporary result(this);
2808         m_jit.move(op1.gpr(), result.gpr());
2809         speculationCheck(
2810             Int52Overflow, JSValueRegs(), 0,
2811             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2812         int52Result(result.gpr(), node);
2813         return;
2814     }
2815 #endif // USE(JSVALUE64)
2816     
2817     case DoubleRepUse: {
2818         SpeculateDoubleOperand op1(this, node->child1());
2819         SpeculateDoubleOperand op2(this, node->child2());
2820         FPRTemporary result(this, op1, op2);
2821
2822         FPRReg reg1 = op1.fpr();
2823         FPRReg reg2 = op2.fpr();
2824         m_jit.addDouble(reg1, reg2, result.fpr());
2825
2826         doubleResult(result.fpr(), node);
2827         return;
2828     }
2829         
2830     default:
2831         RELEASE_ASSERT_NOT_REACHED();
2832         break;
2833     }
2834 }
2835
2836 void SpeculativeJIT::compileMakeRope(Node* node)
2837 {
2838     ASSERT(node->child1().useKind() == KnownStringUse);
2839     ASSERT(node->child2().useKind() == KnownStringUse);
2840     ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
2841     
2842     SpeculateCellOperand op1(this, node->child1());
2843     SpeculateCellOperand op2(this, node->child2());
2844     SpeculateCellOperand op3(this, node->child3());
2845     GPRTemporary result(this);
2846     GPRTemporary allocator(this);
2847     GPRTemporary scratch(this);
2848     
2849     GPRReg opGPRs[3];
2850     unsigned numOpGPRs;
2851     opGPRs[0] = op1.gpr();
2852     opGPRs[1] = op2.gpr();
2853     if (node->child3()) {
2854         opGPRs[2] = op3.gpr();
2855         numOpGPRs = 3;
2856     } else {
2857         opGPRs[2] = InvalidGPRReg;
2858         numOpGPRs = 2;
2859     }
2860     GPRReg resultGPR = result.gpr();
2861     GPRReg allocatorGPR = allocator.gpr();
2862     GPRReg scratchGPR = scratch.gpr();
2863     
2864     JITCompiler::JumpList slowPath;
2865     MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithDestructor(sizeof(JSRopeString));
2866     m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
2867     emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
2868         
2869     m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
2870     for (unsigned i = 0; i < numOpGPRs; ++i)
2871         m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2872     for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
2873         m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2874     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
2875     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
2876     if (!ASSERT_DISABLED) {
2877         JITCompiler::Jump ok = m_jit.branch32(
2878             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2879         m_jit.abortWithReason(DFGNegativeStringLength);
2880         ok.link(&m_jit);
2881     }
2882     for (unsigned i = 1; i < numOpGPRs; ++i) {
2883         m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
2884         speculationCheck(
2885             Uncountable, JSValueSource(), nullptr,
2886             m_jit.branchAdd32(
2887                 JITCompiler::Overflow,
2888                 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
2889     }
2890     m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
2891     m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
2892     if (!ASSERT_DISABLED) {
2893         JITCompiler::Jump ok = m_jit.branch32(
2894             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2895         m_jit.abortWithReason(DFGNegativeStringLength);
2896         ok.link(&m_jit);
2897     }
2898     m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
2899     
2900     switch (numOpGPRs) {
2901     case 2:
2902         addSlowPathGenerator(slowPathCall(
2903             slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
2904         break;
2905     case 3:
2906         addSlowPathGenerator(slowPathCall(
2907             slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
2908         break;
2909     default:
2910         RELEASE_ASSERT_NOT_REACHED();
2911         break;
2912     }
2913         
2914     cellResult(resultGPR, node);
2915 }
2916
2917 void SpeculativeJIT::compileArithClz32(Node* node)
2918 {
2919     ASSERT_WITH_MESSAGE(node->child1().useKind() == Int32Use || node->child1().useKind() == KnownInt32Use, "The Fixup phase should have enforced a Int32 operand.");
2920     SpeculateInt32Operand value(this, node->child1());
2921     GPRTemporary result(this, Reuse, value);
2922     GPRReg valueReg = value.gpr();
2923     GPRReg resultReg = result.gpr();
2924     m_jit.countLeadingZeros32(valueReg, resultReg);
2925     int32Result(resultReg, node);
2926 }
2927
2928 void SpeculativeJIT::compileArithSub(Node* node)
2929 {
2930     switch (node->binaryUseKind()) {
2931     case Int32Use: {
2932         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2933         
2934         if (node->child2()->isNumberConstant()) {
2935             SpeculateInt32Operand op1(this, node->child1());
2936             int32_t imm2 = node->child2()->asInt32();
2937             GPRTemporary result(this);
2938
2939             if (!shouldCheckOverflow(node->arithMode())) {
2940                 m_jit.move(op1.gpr(), result.gpr());
2941                 m_jit.sub32(Imm32(imm2), result.gpr());
2942             } else {
2943                 GPRTemporary scratch(this);
2944                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
2945             }
2946
2947             int32Result(result.gpr(), node);
2948             return;
2949         }
2950             
2951         if (node->child1()->isNumberConstant()) {
2952             int32_t imm1 = node->child1()->asInt32();
2953             SpeculateInt32Operand op2(this, node->child2());
2954             GPRTemporary result(this);
2955                 
2956             m_jit.move(Imm32(imm1), result.gpr());
2957             if (!shouldCheckOverflow(node->arithMode()))
2958                 m_jit.sub32(op2.gpr(), result.gpr());
2959             else
2960                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2961                 
2962             int32Result(result.gpr(), node);
2963             return;
2964         }
2965             
2966         SpeculateInt32Operand op1(this, node->child1());
2967         SpeculateInt32Operand op2(this, node->child2());
2968         GPRTemporary result(this);
2969
2970         if (!shouldCheckOverflow(node->arithMode())) {
2971             m_jit.move(op1.gpr(), result.gpr());
2972             m_jit.sub32(op2.gpr(), result.gpr());
2973         } else
2974             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
2975
2976         int32Result(result.gpr(), node);
2977         return;
2978     }
2979         
2980 #if USE(JSVALUE64)
2981     case Int52RepUse: {
2982         ASSERT(shouldCheckOverflow(node->arithMode()));
2983         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2984
2985         // Will we need an overflow check? If we can prove that neither input can be
2986         // Int52 then the overflow check will not be necessary.
2987         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2988             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2989             SpeculateWhicheverInt52Operand op1(this, node->child1());
2990             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2991             GPRTemporary result(this, Reuse, op1);
2992             m_jit.move(op1.gpr(), result.gpr());
2993             m_jit.sub64(op2.gpr(), result.gpr());
2994             int52Result(result.gpr(), node, op1.format());
2995             return;
2996         }
2997         
2998         SpeculateInt52Operand op1(this, node->child1());
2999         SpeculateInt52Operand op2(this, node->child2());
3000         GPRTemporary result(this);
3001         m_jit.move(op1.gpr(), result.gpr());
3002         speculationCheck(
3003             Int52Overflow, JSValueRegs(), 0,
3004             m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3005         int52Result(result.gpr(), node);
3006         return;
3007     }
3008 #endif // USE(JSVALUE64)
3009
3010     case DoubleRepUse: {
3011         SpeculateDoubleOperand op1(this, node->child1());
3012         SpeculateDoubleOperand op2(this, node->child2());
3013         FPRTemporary result(this, op1);
3014
3015         FPRReg reg1 = op1.fpr();
3016         FPRReg reg2 = op2.fpr();
3017         m_jit.subDouble(reg1, reg2, result.fpr());
3018
3019         doubleResult(result.fpr(), node);
3020         return;
3021     }
3022         
3023     default:
3024         RELEASE_ASSERT_NOT_REACHED();
3025         return;
3026     }
3027 }
3028
3029 void SpeculativeJIT::compileArithNegate(Node* node)
3030 {
3031     switch (node->child1().useKind()) {
3032     case Int32Use: {
3033         SpeculateInt32Operand op1(this, node->child1());
3034         GPRTemporary result(this);
3035
3036         m_jit.move(op1.gpr(), result.gpr());
3037
3038         // Note: there is no notion of being not used as a number, but someone
3039         // caring about negative zero.
3040         
3041         if (!shouldCheckOverflow(node->arithMode()))
3042             m_jit.neg32(result.gpr());
3043         else if (!shouldCheckNegativeZero(node->arithMode()))
3044             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
3045         else {
3046             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
3047             m_jit.neg32(result.gpr());
3048         }
3049
3050         int32Result(result.gpr(), node);
3051         return;
3052     }
3053
3054 #if USE(JSVALUE64)
3055     case Int52RepUse: {
3056         ASSERT(shouldCheckOverflow(node->arithMode()));
3057         
3058         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)) {
3059             SpeculateWhicheverInt52Operand op1(this, node->child1());
3060             GPRTemporary result(this);
3061             GPRReg op1GPR = op1.gpr();
3062             GPRReg resultGPR = result.gpr();
3063             m_jit.move(op1GPR, resultGPR);
3064             m_jit.neg64(resultGPR);
3065             if (shouldCheckNegativeZero(node->arithMode())) {
3066                 speculationCheck(
3067                     NegativeZero, JSValueRegs(), 0,
3068                     m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3069             }
3070             int52Result(resultGPR, node, op1.format());
3071             return;
3072         }
3073         
3074         SpeculateInt52Operand op1(this, node->child1());
3075         GPRTemporary result(this);
3076         GPRReg op1GPR = op1.gpr();
3077         GPRReg resultGPR = result.gpr();
3078         m_jit.move(op1GPR, resultGPR);
3079         speculationCheck(
3080             Int52Overflow, JSValueRegs(), 0,
3081             m_jit.branchNeg64(MacroAssembler::Overflow, resultGPR));
3082         if (shouldCheckNegativeZero(node->arithMode())) {
3083             speculationCheck(
3084                 NegativeZero, JSValueRegs(), 0,
3085                 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3086         }
3087         int52Result(resultGPR, node);
3088         return;
3089     }
3090 #endif // USE(JSVALUE64)
3091         
3092     case DoubleRepUse: {
3093         SpeculateDoubleOperand op1(this, node->child1());
3094         FPRTemporary result(this);
3095         
3096         m_jit.negateDouble(op1.fpr(), result.fpr());
3097         
3098         doubleResult(result.fpr(), node);
3099         return;
3100     }
3101         
3102     default:
3103         RELEASE_ASSERT_NOT_REACHED();
3104         return;
3105     }
3106 }
3107 void SpeculativeJIT::compileArithMul(Node* node)
3108 {
3109     switch (node->binaryUseKind()) {
3110     case Int32Use: {
3111         SpeculateInt32Operand op1(this, node->child1());
3112         SpeculateInt32Operand op2(this, node->child2());
3113         GPRTemporary result(this);
3114
3115         GPRReg reg1 = op1.gpr();
3116         GPRReg reg2 = op2.gpr();
3117
3118         // We can perform truncated multiplications if we get to this point, because if the
3119         // fixup phase could not prove that it would be safe, it would have turned us into
3120         // a double multiplication.
3121         if (!shouldCheckOverflow(node->arithMode())) {
3122             m_jit.move(reg1, result.gpr());
3123             m_jit.mul32(reg2, result.gpr());
3124         } else {
3125             speculationCheck(
3126                 Overflow, JSValueRegs(), 0,
3127                 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3128         }
3129             
3130         // Check for negative zero, if the users of this node care about such things.
3131         if (shouldCheckNegativeZero(node->arithMode())) {
3132             MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3133             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
3134             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
3135             resultNonZero.link(&m_jit);
3136         }
3137
3138         int32Result(result.gpr(), node);
3139         return;
3140     }
3141     
3142 #if USE(JSVALUE64)   
3143     case Int52RepUse: {
3144         ASSERT(shouldCheckOverflow(node->arithMode()));
3145         
3146         // This is super clever. We want to do an int52 multiplication and check the
3147         // int52 overflow bit. There is no direct hardware support for this, but we do
3148         // have the ability to do an int64 multiplication and check the int64 overflow
3149         // bit. We leverage that. Consider that a, b are int52 numbers inside int64
3150         // registers, with the high 12 bits being sign-extended. We can do:
3151         //
3152         //     (a * (b << 12))
3153         //
3154         // This will give us a left-shifted int52 (value is in high 52 bits, low 16
3155         // bits are zero) plus the int52 overflow bit. I.e. whether this 64-bit
3156         // multiplication overflows is identical to whether the 'a * b' 52-bit
3157         // multiplication overflows.
3158         //
3159         // In our nomenclature, this is:
3160         //
3161         //     strictInt52(a) * int52(b) => int52
3162         //
3163         // That is "strictInt52" means unshifted and "int52" means left-shifted by 16
3164         // bits.
3165         //
3166         // We don't care which of op1 or op2 serves as the left-shifted operand, so
3167         // we just do whatever is more convenient for op1 and have op2 do the
3168         // opposite. This ensures that we do at most one shift.
3169
3170         SpeculateWhicheverInt52Operand op1(this, node->child1());
3171         SpeculateWhicheverInt52Operand op2(this, node->child2(), OppositeShift, op1);
3172         GPRTemporary result(this);
3173         
3174         GPRReg op1GPR = op1.gpr();
3175         GPRReg op2GPR = op2.gpr();
3176         GPRReg resultGPR = result.gpr();
3177         
3178         m_jit.move(op1GPR, resultGPR);
3179         speculationCheck(
3180             Int52Overflow, JSValueRegs(), 0,
3181             m_jit.branchMul64(MacroAssembler::Overflow, op2GPR, resultGPR));
3182         
3183         if (shouldCheckNegativeZero(node->arithMode())) {
3184             MacroAssembler::Jump resultNonZero = m_jit.branchTest64(
3185                 MacroAssembler::NonZero, resultGPR);
3186             speculationCheck(
3187                 NegativeZero, JSValueRegs(), 0,
3188                 m_jit.branch64(MacroAssembler::LessThan, op1GPR, TrustedImm64(0)));
3189             speculationCheck(
3190                 NegativeZero, JSValueRegs(), 0,
3191                 m_jit.branch64(MacroAssembler::LessThan, op2GPR, TrustedImm64(0)));
3192             resultNonZero.link(&m_jit);
3193         }
3194         
3195         int52Result(resultGPR, node);
3196         return;
3197     }
3198 #endif // USE(JSVALUE64)
3199         
3200     case DoubleRepUse: {
3201         SpeculateDoubleOperand op1(this, node->child1());
3202         SpeculateDoubleOperand op2(this, node->child2());
3203         FPRTemporary result(this, op1, op2);
3204         
3205         FPRReg reg1 = op1.fpr();
3206         FPRReg reg2 = op2.fpr();
3207         
3208         m_jit.mulDouble(reg1, reg2, result.fpr());
3209         
3210         doubleResult(result.fpr(), node);
3211         return;
3212     }
3213         
3214     default:
3215         RELEASE_ASSERT_NOT_REACHED();
3216         return;
3217     }
3218 }
3219
3220 void SpeculativeJIT::compileArithDiv(Node* node)
3221 {
3222     switch (node->binaryUseKind()) {
3223     case Int32Use: {
3224 #if CPU(X86) || CPU(X86_64)
3225         SpeculateInt32Operand op1(this, node->child1());
3226         SpeculateInt32Operand op2(this, node->child2());
3227         GPRTemporary eax(this, X86Registers::eax);
3228         GPRTemporary edx(this, X86Registers::edx);
3229         GPRReg op1GPR = op1.gpr();
3230         GPRReg op2GPR = op2.gpr();
3231     
3232         GPRReg op2TempGPR;
3233         GPRReg temp;
3234         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3235             op2TempGPR = allocate();
3236             temp = op2TempGPR;
3237         } else {
3238             op2TempGPR = InvalidGPRReg;
3239             if (op1GPR == X86Registers::eax)
3240                 temp = X86Registers::edx;
3241             else
3242                 temp = X86Registers::eax;
3243         }
3244     
3245         ASSERT(temp != op1GPR);
3246         ASSERT(temp != op2GPR);
3247     
3248         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3249     
3250         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3251     
3252         JITCompiler::JumpList done;
3253         if (shouldCheckOverflow(node->arithMode())) {
3254             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3255             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3256         } else {
3257             // This is the case where we convert the result to an int after we're done, and we
3258             // already know that the denominator is either -1 or 0. So, if the denominator is
3259             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3260             // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
3261             // are happy to fall through to a normal division, since we're just dividing
3262             // something by negative 1.
3263         
3264             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3265             m_jit.move(TrustedImm32(0), eax.gpr());
3266             done.append(m_jit.jump());
3267         
3268             notZero.link(&m_jit);
3269             JITCompiler::Jump notNeg2ToThe31 =
3270                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3271             m_jit.zeroExtend32ToPtr(op1GPR, eax.gpr());
3272             done.append(m_jit.jump());
3273         
3274             notNeg2ToThe31.link(&m_jit);
3275         }
3276     
3277         safeDenominator.link(&m_jit);
3278     
3279         // If the user cares about negative zero, then speculate that we're not about
3280         // to produce negative zero.
3281         if (shouldCheckNegativeZero(node->arithMode())) {
3282             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3283             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3284             numeratorNonZero.link(&m_jit);
3285         }
3286     
3287         if (op2TempGPR != InvalidGPRReg) {
3288             m_jit.move(op2GPR, op2TempGPR);
3289             op2GPR = op2TempGPR;
3290         }
3291             
3292         m_jit.move(op1GPR, eax.gpr());
3293         m_jit.assembler().cdq();
3294         m_jit.assembler().idivl_r(op2GPR);
3295             
3296         if (op2TempGPR != InvalidGPRReg)
3297             unlock(op2TempGPR);
3298
3299         // Check that there was no remainder. If there had been, then we'd be obligated to
3300         // produce a double result instead.
3301         if (shouldCheckOverflow(node->arithMode()))
3302             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
3303         
3304         done.link(&m_jit);
3305         int32Result(eax.gpr(), node);
3306 #elif HAVE(ARM_IDIV_INSTRUCTIONS) || CPU(ARM64)
3307         SpeculateInt32Operand op1(this, node->child1());
3308         SpeculateInt32Operand op2(this, node->child2());
3309         GPRReg op1GPR = op1.gpr();
3310         GPRReg op2GPR = op2.gpr();
3311         GPRTemporary quotient(this);
3312         GPRTemporary multiplyAnswer(this);
3313
3314         // If the user cares about negative zero, then speculate that we're not about
3315         // to produce negative zero.
3316         if (shouldCheckNegativeZero(node->arithMode())) {
3317             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3318             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3319             numeratorNonZero.link(&m_jit);
3320         }
3321
3322         m_jit.assembler().sdiv<32>(quotient.gpr(), op1GPR, op2GPR);
3323
3324         // Check that there was no remainder. If there had been, then we'd be obligated to
3325         // produce a double result instead.
3326         if (shouldCheckOverflow(node->arithMode())) {
3327             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
3328             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
3329         }
3330
3331         int32Result(quotient.gpr(), node);
3332 #else
3333         RELEASE_ASSERT_NOT_REACHED();
3334 #endif
3335         break;
3336     }
3337         
3338     case DoubleRepUse: {
3339         SpeculateDoubleOperand op1(this, node->child1());
3340         SpeculateDoubleOperand op2(this, node->child2());
3341         FPRTemporary result(this, op1);
3342         
3343         FPRReg reg1 = op1.fpr();
3344         FPRReg reg2 = op2.fpr();
3345         m_jit.divDouble(reg1, reg2, result.fpr());
3346         
3347         doubleResult(result.fpr(), node);
3348         break;
3349     }
3350         
3351     default:
3352         RELEASE_ASSERT_NOT_REACHED();
3353         break;
3354     }
3355 }
3356
3357 void SpeculativeJIT::compileArithMod(Node* node)
3358 {
3359     switch (node->binaryUseKind()) {
3360     case Int32Use: {
3361         // In the fast path, the dividend value could be the final result
3362         // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
3363         SpeculateStrictInt32Operand op1(this, node->child1());
3364         
3365         if (node->child2()->isInt32Constant()) {
3366             int32_t divisor = node->child2()->asInt32();
3367             if (divisor > 1 && hasOneBitSet(divisor)) {
3368                 unsigned logarithm = WTF::fastLog2(divisor);
3369                 GPRReg dividendGPR = op1.gpr();
3370                 GPRTemporary result(this);
3371                 GPRReg resultGPR = result.gpr();
3372
3373                 // This is what LLVM generates. It's pretty crazy. Here's my
3374                 // attempt at understanding it.
3375                 
3376                 // First, compute either divisor - 1, or 0, depending on whether
3377                 // the dividend is negative:
3378                 //
3379                 // If dividend < 0:  resultGPR = divisor - 1
3380                 // If dividend >= 0: resultGPR = 0
3381                 m_jit.move(dividendGPR, resultGPR);
3382                 m_jit.rshift32(TrustedImm32(31), resultGPR);
3383                 m_jit.urshift32(TrustedImm32(32 - logarithm), resultGPR);
3384                 
3385                 // Add in the dividend, so that:
3386                 //
3387                 // If dividend < 0:  resultGPR = dividend + divisor - 1
3388                 // If dividend >= 0: resultGPR = dividend
3389                 m_jit.add32(dividendGPR, resultGPR);
3390                 
3391                 // Mask so as to only get the *high* bits. This rounds down
3392                 // (towards negative infinity) resultGPR to the nearest multiple
3393                 // of divisor, so that:
3394                 //
3395                 // If dividend < 0:  resultGPR = floor((dividend + divisor - 1) / divisor)
3396                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3397                 //
3398                 // Note that this can be simplified to:
3399                 //
3400                 // If dividend < 0:  resultGPR = ceil(dividend / divisor)
3401                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3402                 //
3403                 // Note that if the dividend is negative, resultGPR will also be negative.
3404                 // Regardless of the sign of dividend, resultGPR will be rounded towards
3405                 // zero, because of how things are conditionalized.
3406                 m_jit.and32(TrustedImm32(-divisor), resultGPR);
3407                 
3408                 // Subtract resultGPR from dividendGPR, which yields the remainder:
3409                 //
3410                 // resultGPR = dividendGPR - resultGPR
3411                 m_jit.neg32(resultGPR);
3412                 m_jit.add32(dividendGPR, resultGPR);
3413                 
3414                 if (shouldCheckNegativeZero(node->arithMode())) {
3415                     // Check that we're not about to create negative zero.
3416                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3417                     speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, resultGPR));
3418                     numeratorPositive.link(&m_jit);
3419                 }
3420
3421                 int32Result(resultGPR, node);
3422                 return;
3423             }
3424         }
3425         
3426 #if CPU(X86) || CPU(X86_64)
3427         if (node->child2()->isInt32Constant()) {
3428             int32_t divisor = node->child2()->asInt32();
3429             if (divisor && divisor != -1) {
3430                 GPRReg op1Gpr = op1.gpr();
3431
3432                 GPRTemporary eax(this, X86Registers::eax);
3433                 GPRTemporary edx(this, X86Registers::edx);
3434                 GPRTemporary scratch(this);
3435                 GPRReg scratchGPR = scratch.gpr();
3436
3437                 GPRReg op1SaveGPR;
3438                 if (op1Gpr == X86Registers::eax || op1Gpr == X86Registers::edx) {
3439                     op1SaveGPR = allocate();
3440                     ASSERT(op1Gpr != op1SaveGPR);
3441                     m_jit.move(op1Gpr, op1SaveGPR);
3442                 } else
3443                     op1SaveGPR = op1Gpr;
3444                 ASSERT(op1SaveGPR != X86Registers::eax);
3445                 ASSERT(op1SaveGPR != X86Registers::edx);
3446
3447                 m_jit.move(op1Gpr, eax.gpr());
3448                 m_jit.move(TrustedImm32(divisor), scratchGPR);
3449                 m_jit.assembler().cdq();
3450                 m_jit.assembler().idivl_r(scratchGPR);
3451                 if (shouldCheckNegativeZero(node->arithMode())) {
3452                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3453                     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3454                     numeratorPositive.link(&m_jit);
3455                 }
3456             
3457                 if (op1SaveGPR != op1Gpr)
3458                     unlock(op1SaveGPR);
3459
3460                 int32Result(edx.gpr(), node);
3461                 return;
3462             }
3463         }
3464 #endif
3465
3466         SpeculateInt32Operand op2(this, node->child2());
3467 #if CPU(X86) || CPU(X86_64)
3468         GPRTemporary eax(this, X86Registers::eax);
3469         GPRTemporary edx(this, X86Registers::edx);
3470         GPRReg op1GPR = op1.gpr();
3471         GPRReg op2GPR = op2.gpr();
3472     
3473         GPRReg op2TempGPR;
3474         GPRReg temp;
3475         GPRReg op1SaveGPR;
3476     
3477         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3478             op2TempGPR = allocate();
3479             temp = op2TempGPR;
3480         } else {
3481             op2TempGPR = InvalidGPRReg;
3482             if (op1GPR == X86Registers::eax)
3483                 temp = X86Registers::edx;
3484             else
3485                 temp = X86Registers::eax;
3486         }
3487     
3488         if (op1GPR == X86Registers::eax || op1GPR == X86Registers::edx) {
3489             op1SaveGPR = allocate();
3490             ASSERT(op1GPR != op1SaveGPR);
3491             m_jit.move(op1GPR, op1SaveGPR);
3492         } else
3493             op1SaveGPR = op1GPR;
3494     
3495         ASSERT(temp != op1GPR);
3496         ASSERT(temp != op2GPR);
3497         ASSERT(op1SaveGPR != X86Registers::eax);
3498         ASSERT(op1SaveGPR != X86Registers::edx);
3499     
3500         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3501     
3502         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3503     
3504         JITCompiler::JumpList done;
3505         
3506         // FIXME: -2^31 / -1 will actually yield negative zero, so we could have a
3507         // separate case for that. But it probably doesn't matter so much.
3508         if (shouldCheckOverflow(node->arithMode())) {
3509             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3510             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3511         } else {
3512             // This is the case where we convert the result to an int after we're done, and we
3513             // already know that the denominator is either -1 or 0. So, if the denominator is
3514             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3515             // -1) and the numerator is -2^31 then the result should be 0. Otherwise we are
3516             // happy to fall through to a normal division, since we're just dividing something
3517             // by negative 1.
3518         
3519             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3520             m_jit.move(TrustedImm32(0), edx.gpr());
3521             done.append(m_jit.jump());
3522         
3523             notZero.link(&m_jit);
3524             JITCompiler::Jump notNeg2ToThe31 =
3525                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3526             m_jit.move(TrustedImm32(0), edx.gpr());
3527             done.append(m_jit.jump());
3528         
3529             notNeg2ToThe31.link(&m_jit);
3530         }
3531         
3532         safeDenominator.link(&m_jit);
3533             
3534         if (op2TempGPR != InvalidGPRReg) {
3535             m_jit.move(op2GPR, op2TempGPR);
3536             op2GPR = op2TempGPR;
3537         }
3538             
3539         m_jit.move(op1GPR, eax.gpr());
3540         m_jit.assembler().cdq();
3541         m_jit.assembler().idivl_r(op2GPR);
3542             
3543         if (op2TempGPR != InvalidGPRReg)
3544             unlock(op2TempGPR);
3545
3546         // Check that we're not about to create negative zero.
3547         if (shouldCheckNegativeZero(node->arithMode())) {
3548             JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3549             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3550             numeratorPositive.link(&m_jit);
3551         }
3552     
3553         if (op1SaveGPR != op1GPR)
3554             unlock(op1SaveGPR);
3555             
3556         done.link(&m_jit);
3557         int32Result(edx.gpr(), node);
3558
3559 #elif HAVE(ARM_IDIV_INSTRUCTIONS) || CPU(ARM64)
3560         GPRTemporary temp(this);
3561         GPRTemporary quotientThenRemainder(this);
3562         GPRTemporary multiplyAnswer(this);
3563         GPRReg dividendGPR = op1.gpr();
3564         GPRReg divisorGPR = op2.gpr();
3565         GPRReg quotientThenRemainderGPR = quotientThenRemainder.gpr();
3566         GPRReg multiplyAnswerGPR = multiplyAnswer.gpr();
3567
3568         JITCompiler::JumpList done;
3569     
3570         if (shouldCheckOverflow(node->arithMode()))
3571             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, divisorGPR));
3572         else {
3573             JITCompiler::Jump denominatorNotZero = m_jit.branchTest32(JITCompiler::NonZero, divisorGPR);
3574             m_jit.move(divisorGPR, quotientThenRemainderGPR);
3575             done.append(m_jit.jump());
3576             denominatorNotZero.link(&m_jit);
3577         }
3578
3579         m_jit.assembler().sdiv<32>(quotientThenRemainderGPR, dividendGPR, divisorGPR);
3580         // FIXME: It seems like there are cases where we don't need this? What if we have
3581         // arithMode() == Arith::Unchecked?
3582         // https://bugs.webkit.org/show_bug.cgi?id=126444
3583         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotientThenRemainderGPR, divisorGPR, multiplyAnswerGPR));
3584 #if HAVE(ARM_IDIV_INSTRUCTIONS)
3585         m_jit.assembler().sub(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3586 #else
3587         m_jit.assembler().sub<32>(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3588 #endif
3589
3590         // If the user cares about negative zero, then speculate that we're not about
3591         // to produce negative zero.
3592         if (shouldCheckNegativeZero(node->arithMode())) {
3593             // Check that we're not about to create negative zero.
3594             JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3595             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, quotientThenRemainderGPR));
3596             numeratorPositive.link(&m_jit);
3597         }
3598
3599         done.link(&m_jit);
3600
3601         int32Result(quotientThenRemainderGPR, node);
3602 #else // not architecture that can do integer division
3603         RELEASE_ASSERT_NOT_REACHED();
3604 #endif
3605         return;
3606     }
3607         
3608     case DoubleRepUse: {
3609         SpeculateDoubleOperand op1(this, node->child1());
3610         SpeculateDoubleOperand op2(this, node->child2());
3611         
3612         FPRReg op1FPR = op1.fpr();
3613         FPRReg op2FPR = op2.fpr();
3614         
3615         flushRegisters();
3616         
3617         FPRResult result(this);
3618         
3619         callOperation(fmodAsDFGOperation, result.fpr(), op1FPR, op2FPR);
3620         
3621         doubleResult(result.fpr(), node);
3622         return;
3623     }
3624         
3625     default:
3626         RELEASE_ASSERT_NOT_REACHED();
3627         return;
3628     }
3629 }
3630
3631 void SpeculativeJIT::compileArithRound(Node* node)
3632 {
3633     ASSERT(node->child1().useKind() == DoubleRepUse);
3634
3635     SpeculateDoubleOperand value(this, node->child1());
3636     FPRReg valueFPR = value.fpr();
3637
3638     if (producesInteger(node->arithRoundingMode()) && !shouldCheckNegativeZero(node->arithRoundingMode())) {
3639         FPRTemporary oneHalf(this);
3640         GPRTemporary roundedResultAsInt32(this);
3641         FPRReg oneHalfFPR = oneHalf.fpr();
3642         GPRReg resultGPR = roundedResultAsInt32.gpr();
3643
3644         static const double halfConstant = 0.5;
3645         m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), oneHalfFPR);
3646         m_jit.addDouble(valueFPR, oneHalfFPR);
3647
3648         JITCompiler::Jump truncationFailed = m_jit.branchTruncateDoubleToInt32(oneHalfFPR, resultGPR);
3649         speculationCheck(Overflow, JSValueRegs(), node, truncationFailed);
3650         int32Result(resultGPR, node);
3651         return;
3652     }
3653
3654     flushRegisters();
3655     FPRResult roundedResultAsDouble(this);
3656     FPRReg resultFPR = roundedResultAsDouble.fpr();
3657     callOperation(jsRound, resultFPR, valueFPR);
3658     if (producesInteger(node->arithRoundingMode())) {
3659         GPRTemporary roundedResultAsInt32(this);
3660         FPRTemporary scratch(this);
3661         FPRReg scratchFPR = scratch.fpr();
3662         GPRReg resultGPR = roundedResultAsInt32.gpr();
3663         JITCompiler::JumpList failureCases;
3664         m_jit.branchConvertDoubleToInt32(resultFPR, resultGPR, failureCases, scratchFPR);
3665         speculationCheck(Overflow, JSValueRegs(), node, failureCases);
3666
3667         int32Result(resultGPR, node);
3668     } else
3669         doubleResult(resultFPR, node);
3670 }
3671
3672 void SpeculativeJIT::compileArithSqrt(Node* node)
3673 {
3674     SpeculateDoubleOperand op1(this, node->child1());
3675     FPRReg op1FPR = op1.fpr();
3676
3677     if (!MacroAssembler::supportsFloatingPointSqrt() || !Options::enableArchitectureSpecificOptimizations()) {
3678         flushRegisters();
3679         FPRResult result(this);
3680         callOperation(sqrt, result.fpr(), op1FPR);
3681         doubleResult(result.fpr(), node);
3682     } else {
3683         FPRTemporary result(this, op1);
3684         m_jit.sqrtDouble(op1.fpr(), result.fpr());
3685         doubleResult(result.fpr(), node);
3686     }
3687 }
3688
3689 // For small positive integers , it is worth doing a tiny inline loop to exponentiate the base.
3690 // Every register is clobbered by this helper.
3691 static MacroAssembler::Jump compileArithPowIntegerFastPath(JITCompiler& assembler, FPRReg xOperand, GPRReg yOperand, FPRReg result)
3692 {
3693     MacroAssembler::JumpList skipFastPath;
3694     skipFastPath.append(assembler.branch32(MacroAssembler::LessThan, yOperand, MacroAssembler::TrustedImm32(0)));
3695     skipFastPath.append(assembler.branch32(MacroAssembler::GreaterThan, yOperand, MacroAssembler::TrustedImm32(1000)));
3696
3697     static const double oneConstant = 1.0;
3698     assembler.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant), result);
3699
3700     MacroAssembler::Label startLoop(assembler.label());
3701     MacroAssembler::Jump exponentIsEven = assembler.branchTest32(MacroAssembler::Zero, yOperand, MacroAssembler::TrustedImm32(1));
3702     assembler.mulDouble(xOperand, result);
3703     exponentIsEven.link(&assembler);
3704     assembler.mulDouble(xOperand, xOperand);
3705     assembler.rshift32(MacroAssembler::TrustedImm32(1), yOperand);
3706     assembler.branchTest32(MacroAssembler::NonZero, yOperand).linkTo(startLoop, &assembler);
3707
3708     MacroAssembler::Jump skipSlowPath = assembler.jump();
3709     skipFastPath.link(&assembler);
3710
3711     return skipSlowPath;
3712 }
3713
3714 void SpeculativeJIT::compileArithPow(Node* node)
3715 {
3716     if (node->child2().useKind() == Int32Use) {
3717         SpeculateDoubleOperand xOperand(this, node->child1());
3718         SpeculateInt32Operand yOperand(this, node->child2());
3719         FPRReg xOperandfpr = xOperand.fpr();
3720         GPRReg yOperandGpr = yOperand.gpr();
3721         FPRTemporary yOperandfpr(this);
3722
3723         flushRegisters();
3724
3725         FPRResult result(this);
3726         FPRReg resultFpr = result.fpr();
3727
3728         FPRTemporary xOperandCopy(this);
3729         FPRReg xOperandCopyFpr = xOperandCopy.fpr();
3730         m_jit.moveDouble(xOperandfpr, xOperandCopyFpr);
3731
3732         GPRTemporary counter(this);
3733         GPRReg counterGpr = counter.gpr();
3734         m_jit.move(yOperandGpr, counterGpr);
3735
3736         MacroAssembler::Jump skipFallback = compileArithPowIntegerFastPath(m_jit, xOperandCopyFpr, counterGpr, resultFpr);
3737         m_jit.convertInt32ToDouble(yOperandGpr, yOperandfpr.fpr());
3738         callOperation(operationMathPow, resultFpr, xOperandfpr, yOperandfpr.fpr());
3739
3740         skipFallback.link(&m_jit);
3741         doubleResult(resultFpr, node);
3742         return;
3743     }
3744
3745     SpeculateDoubleOperand xOperand(this, node->child1());
3746     SpeculateDoubleOperand yOperand(this, node->child2());
3747     FPRReg xOperandfpr = xOperand.fpr();
3748     FPRReg yOperandfpr = yOperand.fpr();
3749
3750     flushRegisters();
3751
3752     FPRResult result(this);
3753     FPRReg resultFpr = result.fpr();
3754
3755     FPRTemporary xOperandCopy(this);
3756     FPRReg xOperandCopyFpr = xOperandCopy.fpr();
3757
3758     FPRTemporary scratch(this);
3759     FPRReg scratchFpr = scratch.fpr();
3760
3761     GPRTemporary yOperandInteger(this);
3762     GPRReg yOperandIntegerGpr = yOperandInteger.gpr();
3763     MacroAssembler::JumpList failedExponentConversionToInteger;
3764     m_jit.branchConvertDoubleToInt32(yOperandfpr, yOperandIntegerGpr, failedExponentConversionToInteger, scratchFpr, false);
3765
3766     m_jit.moveDouble(xOperandfpr, xOperandCopyFpr);
3767     MacroAssembler::Jump skipFallback = compileArithPowIntegerFastPath(m_jit, xOperandCopyFpr, yOperandInteger.gpr(), resultFpr);
3768     failedExponentConversionToInteger.link(&m_jit);
3769
3770     callOperation(operationMathPow, resultFpr, xOperandfpr, yOperandfpr);
3771     skipFallback.link(&m_jit);
3772     doubleResult(resultFpr, node);
3773 }
3774
3775 void SpeculativeJIT::compileArithLog(Node* node)
3776 {
3777     SpeculateDoubleOperand op1(this, node->child1());
3778     FPRReg op1FPR = op1.fpr();
3779     flushRegisters();
3780     FPRResult result(this);
3781     callOperation(log, result.fpr(), op1FPR);
3782     doubleResult(result.fpr(), node);
3783 }
3784
3785 // Returns true if the compare is fused with a subsequent branch.
3786 bool SpeculativeJIT::compare(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
3787 {
3788     if (compilePeepHoleBranch(node, condition, doubleCondition, operation))
3789         return true;
3790
3791     if (node->isBinaryUseKind(Int32Use)) {
3792         compileInt32Compare(node, condition);
3793         return false;
3794     }
3795     
3796 #if USE(JSVALUE64)
3797     if (node->isBinaryUseKind(Int52RepUse)) {
3798         compileInt52Compare(node, condition);