Make sure range based iteration of Vector<> still receives bounds checking
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "Arguments.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGBinarySwitch.h"
35 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGSaneStringGetByValSlowPathGenerator.h"
38 #include "DFGSlowPathGenerator.h"
39 #include "LinkBuffer.h"
40 #include "JSCInlines.h"
41 #include "ScratchRegisterAllocator.h"
42 #include "WriteBarrierBuffer.h"
43 #include <wtf/MathExtras.h>
44
45 namespace JSC { namespace DFG {
46
47 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
48     : m_compileOkay(true)
49     , m_jit(jit)
50     , m_currentNode(0)
51     , m_lastGeneratedNode(LastNodeType)
52     , m_indexInBlock(0)
53     , m_generationInfo(m_jit.graph().frameRegisterCount())
54     , m_state(m_jit.graph())
55     , m_interpreter(m_jit.graph(), m_state)
56     , m_stream(&jit.jitCode()->variableEventStream)
57     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
58     , m_isCheckingArgumentTypes(false)
59 {
60 }
61
62 SpeculativeJIT::~SpeculativeJIT()
63 {
64 }
65
66 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
67 {
68     ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
69     
70     GPRTemporary scratch(this);
71     GPRTemporary scratch2(this);
72     GPRReg scratchGPR = scratch.gpr();
73     GPRReg scratch2GPR = scratch2.gpr();
74     
75     unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
76     
77     JITCompiler::JumpList slowCases;
78     
79     slowCases.append(
80         emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
81     m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
82     emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
83     
84     m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
85     m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
86     
87     if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
88 #if USE(JSVALUE64)
89         m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
90         for (unsigned i = numElements; i < vectorLength; ++i)
91             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
92 #else
93         EncodedValueDescriptor value;
94         value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
95         for (unsigned i = numElements; i < vectorLength; ++i) {
96             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
97             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
98         }
99 #endif
100     }
101     
102     // I want a slow path that also loads out the storage pointer, and that's
103     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
104     // of work for a very small piece of functionality. :-/
105     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
106             slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
107             structure, numElements));
108 }
109
110 void SpeculativeJIT::emitAllocateArguments(GPRReg resultGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath)
111 {
112     Structure* structure = m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)->argumentsStructure();
113
114     m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), scratchGPR1);
115     m_jit.mul32(TrustedImm32(sizeof(JSValue)), scratchGPR1, scratchGPR1);
116     m_jit.add32(TrustedImm32(Arguments::offsetOfInlineRegisterArray()), scratchGPR1);
117     emitAllocateVariableSizedJSObject<Arguments>(resultGPR, structure, scratchGPR1, scratchGPR1, scratchGPR2, slowPath);
118
119     m_jit.storePtr(TrustedImmPtr(0), MacroAssembler::Address(resultGPR, Arguments::offsetOfActivation()));
120
121     m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), scratchGPR1);
122     m_jit.sub32(TrustedImm32(1), scratchGPR1);
123     m_jit.store32(scratchGPR1, MacroAssembler::Address(resultGPR, Arguments::offsetOfNumArguments()));
124
125     m_jit.store32(TrustedImm32(0), MacroAssembler::Address(resultGPR, Arguments::offsetOfOverrodeLength()));
126     if (m_jit.isStrictModeFor(m_currentNode->origin.semantic))
127         m_jit.store8(TrustedImm32(1), MacroAssembler::Address(resultGPR, Arguments::offsetOfIsStrictMode()));
128
129     m_jit.storePtr(GPRInfo::callFrameRegister, MacroAssembler::Address(resultGPR, Arguments::offsetOfRegisters()));
130     m_jit.storePtr(TrustedImmPtr(0), MacroAssembler::Address(resultGPR, Arguments::offsetOfSlowArgumentData()));
131
132     m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), scratchGPR1);
133     m_jit.storePtr(scratchGPR1, MacroAssembler::Address(resultGPR, Arguments::offsetOfCallee()));
134
135 }
136
137 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
138 {
139     if (!m_compileOkay)
140         return;
141     ASSERT(m_isCheckingArgumentTypes || m_canExit);
142     m_jit.appendExitInfo(jumpToFail);
143     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
144 }
145
146 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
147 {
148     if (!m_compileOkay)
149         return;
150     ASSERT(m_isCheckingArgumentTypes || m_canExit);
151     m_jit.appendExitInfo(jumpsToFail);
152     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
153 }
154
155 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
156 {
157     if (!m_compileOkay)
158         return OSRExitJumpPlaceholder();
159     ASSERT(m_isCheckingArgumentTypes || m_canExit);
160     unsigned index = m_jit.jitCode()->osrExit.size();
161     m_jit.appendExitInfo();
162     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
163     return OSRExitJumpPlaceholder(index);
164 }
165
166 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
167 {
168     ASSERT(m_isCheckingArgumentTypes || m_canExit);
169     return speculationCheck(kind, jsValueSource, nodeUse.node());
170 }
171
172 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
173 {
174     ASSERT(m_isCheckingArgumentTypes || m_canExit);
175     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
176 }
177
178 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
179 {
180     ASSERT(m_isCheckingArgumentTypes || m_canExit);
181     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
182 }
183
184 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
185 {
186     if (!m_compileOkay)
187         return;
188     ASSERT(m_isCheckingArgumentTypes || m_canExit);
189     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
190     m_jit.appendExitInfo(jumpToFail);
191     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
192 }
193
194 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
195 {
196     ASSERT(m_isCheckingArgumentTypes || m_canExit);
197     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
198 }
199
200 void SpeculativeJIT::emitInvalidationPoint(Node* node)
201 {
202     if (!m_compileOkay)
203         return;
204     ASSERT(m_canExit);
205     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
206     m_jit.jitCode()->appendOSRExit(OSRExit(
207         UncountableInvalidation, JSValueSource(),
208         m_jit.graph().methodOfGettingAValueProfileFor(node),
209         this, m_stream->size()));
210     info.m_replacementSource = m_jit.watchpointLabel();
211     ASSERT(info.m_replacementSource.isSet());
212     noResult(node);
213 }
214
215 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
216 {
217     ASSERT(m_isCheckingArgumentTypes || m_canExit);
218     if (!m_compileOkay)
219         return;
220     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
221     m_compileOkay = false;
222     if (verboseCompilationEnabled())
223         dataLog("Bailing compilation.\n");
224 }
225
226 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
227 {
228     ASSERT(m_isCheckingArgumentTypes || m_canExit);
229     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
230 }
231
232 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
233 {
234     ASSERT(needsTypeCheck(edge, typesPassedThrough));
235     m_interpreter.filter(edge, typesPassedThrough);
236     speculationCheck(BadType, source, edge.node(), jumpToFail);
237 }
238
239 RegisterSet SpeculativeJIT::usedRegisters()
240 {
241     RegisterSet result;
242     
243     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
244         GPRReg gpr = GPRInfo::toRegister(i);
245         if (m_gprs.isInUse(gpr))
246             result.set(gpr);
247     }
248     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
249         FPRReg fpr = FPRInfo::toRegister(i);
250         if (m_fprs.isInUse(fpr))
251             result.set(fpr);
252     }
253     
254     result.merge(RegisterSet::specialRegisters());
255     
256     return result;
257 }
258
259 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
260 {
261     m_slowPathGenerators.append(WTF::move(slowPathGenerator));
262 }
263
264 void SpeculativeJIT::runSlowPathGenerators()
265 {
266     for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
267         m_slowPathGenerators[i]->generate(this);
268 }
269
270 // On Windows we need to wrap fmod; on other platforms we can call it directly.
271 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
272 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
273 static double JIT_OPERATION fmodAsDFGOperation(double x, double y)
274 {
275     return fmod(x, y);
276 }
277 #else
278 #define fmodAsDFGOperation fmod
279 #endif
280
281 void SpeculativeJIT::clearGenerationInfo()
282 {
283     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
284         m_generationInfo[i] = GenerationInfo();
285     m_gprs = RegisterBank<GPRInfo>();
286     m_fprs = RegisterBank<FPRInfo>();
287 }
288
289 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
290 {
291     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
292     Node* node = info.node();
293     DataFormat registerFormat = info.registerFormat();
294     ASSERT(registerFormat != DataFormatNone);
295     ASSERT(registerFormat != DataFormatDouble);
296         
297     SilentSpillAction spillAction;
298     SilentFillAction fillAction;
299         
300     if (!info.needsSpill())
301         spillAction = DoNothingForSpill;
302     else {
303 #if USE(JSVALUE64)
304         ASSERT(info.gpr() == source);
305         if (registerFormat == DataFormatInt32)
306             spillAction = Store32Payload;
307         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
308             spillAction = StorePtr;
309         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
310             spillAction = Store64;
311         else {
312             ASSERT(registerFormat & DataFormatJS);
313             spillAction = Store64;
314         }
315 #elif USE(JSVALUE32_64)
316         if (registerFormat & DataFormatJS) {
317             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
318             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
319         } else {
320             ASSERT(info.gpr() == source);
321             spillAction = Store32Payload;
322         }
323 #endif
324     }
325         
326     if (registerFormat == DataFormatInt32) {
327         ASSERT(info.gpr() == source);
328         ASSERT(isJSInt32(info.registerFormat()));
329         if (node->hasConstant()) {
330             ASSERT(node->isInt32Constant());
331             fillAction = SetInt32Constant;
332         } else
333             fillAction = Load32Payload;
334     } else if (registerFormat == DataFormatBoolean) {
335 #if USE(JSVALUE64)
336         RELEASE_ASSERT_NOT_REACHED();
337 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
338         fillAction = DoNothingForFill;
339 #endif
340 #elif USE(JSVALUE32_64)
341         ASSERT(info.gpr() == source);
342         if (node->hasConstant()) {
343             ASSERT(node->isBooleanConstant());
344             fillAction = SetBooleanConstant;
345         } else
346             fillAction = Load32Payload;
347 #endif
348     } else if (registerFormat == DataFormatCell) {
349         ASSERT(info.gpr() == source);
350         if (node->hasConstant()) {
351             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
352             node->asCell(); // To get the assertion.
353             fillAction = SetCellConstant;
354         } else {
355 #if USE(JSVALUE64)
356             fillAction = LoadPtr;
357 #else
358             fillAction = Load32Payload;
359 #endif
360         }
361     } else if (registerFormat == DataFormatStorage) {
362         ASSERT(info.gpr() == source);
363         fillAction = LoadPtr;
364     } else if (registerFormat == DataFormatInt52) {
365         if (node->hasConstant())
366             fillAction = SetInt52Constant;
367         else if (info.spillFormat() == DataFormatInt52)
368             fillAction = Load64;
369         else if (info.spillFormat() == DataFormatStrictInt52)
370             fillAction = Load64ShiftInt52Left;
371         else if (info.spillFormat() == DataFormatNone)
372             fillAction = Load64;
373         else {
374             RELEASE_ASSERT_NOT_REACHED();
375 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
376             fillAction = Load64; // Make GCC happy.
377 #endif
378         }
379     } else if (registerFormat == DataFormatStrictInt52) {
380         if (node->hasConstant())
381             fillAction = SetStrictInt52Constant;
382         else if (info.spillFormat() == DataFormatInt52)
383             fillAction = Load64ShiftInt52Right;
384         else if (info.spillFormat() == DataFormatStrictInt52)
385             fillAction = Load64;
386         else if (info.spillFormat() == DataFormatNone)
387             fillAction = Load64;
388         else {
389             RELEASE_ASSERT_NOT_REACHED();
390 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
391             fillAction = Load64; // Make GCC happy.
392 #endif
393         }
394     } else {
395         ASSERT(registerFormat & DataFormatJS);
396 #if USE(JSVALUE64)
397         ASSERT(info.gpr() == source);
398         if (node->hasConstant()) {
399             if (node->isCellConstant())
400                 fillAction = SetTrustedJSConstant;
401             else
402                 fillAction = SetJSConstant;
403         } else if (info.spillFormat() == DataFormatInt32) {
404             ASSERT(registerFormat == DataFormatJSInt32);
405             fillAction = Load32PayloadBoxInt;
406         } else
407             fillAction = Load64;
408 #else
409         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
410         if (node->hasConstant())
411             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
412         else if (info.payloadGPR() == source)
413             fillAction = Load32Payload;
414         else { // Fill the Tag
415             switch (info.spillFormat()) {
416             case DataFormatInt32:
417                 ASSERT(registerFormat == DataFormatJSInt32);
418                 fillAction = SetInt32Tag;
419                 break;
420             case DataFormatCell:
421                 ASSERT(registerFormat == DataFormatJSCell);
422                 fillAction = SetCellTag;
423                 break;
424             case DataFormatBoolean:
425                 ASSERT(registerFormat == DataFormatJSBoolean);
426                 fillAction = SetBooleanTag;
427                 break;
428             default:
429                 fillAction = Load32Tag;
430                 break;
431             }
432         }
433 #endif
434     }
435         
436     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
437 }
438     
439 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
440 {
441     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
442     Node* node = info.node();
443     ASSERT(info.registerFormat() == DataFormatDouble);
444
445     SilentSpillAction spillAction;
446     SilentFillAction fillAction;
447         
448     if (!info.needsSpill())
449         spillAction = DoNothingForSpill;
450     else {
451         ASSERT(!node->hasConstant());
452         ASSERT(info.spillFormat() == DataFormatNone);
453         ASSERT(info.fpr() == source);
454         spillAction = StoreDouble;
455     }
456         
457 #if USE(JSVALUE64)
458     if (node->hasConstant()) {
459         node->asNumber(); // To get the assertion.
460         fillAction = SetDoubleConstant;
461     } else {
462         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
463         fillAction = LoadDouble;
464     }
465 #elif USE(JSVALUE32_64)
466     ASSERT(info.registerFormat() == DataFormatDouble);
467     if (node->hasConstant()) {
468         node->asNumber(); // To get the assertion.
469         fillAction = SetDoubleConstant;
470     } else
471         fillAction = LoadDouble;
472 #endif
473
474     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
475 }
476     
477 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
478 {
479     switch (plan.spillAction()) {
480     case DoNothingForSpill:
481         break;
482     case Store32Tag:
483         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
484         break;
485     case Store32Payload:
486         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
487         break;
488     case StorePtr:
489         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
490         break;
491 #if USE(JSVALUE64)
492     case Store64:
493         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
494         break;
495 #endif
496     case StoreDouble:
497         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
498         break;
499     default:
500         RELEASE_ASSERT_NOT_REACHED();
501     }
502 }
503     
504 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
505 {
506 #if USE(JSVALUE32_64)
507     UNUSED_PARAM(canTrample);
508 #endif
509     switch (plan.fillAction()) {
510     case DoNothingForFill:
511         break;
512     case SetInt32Constant:
513         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
514         break;
515 #if USE(JSVALUE64)
516     case SetInt52Constant:
517         m_jit.move(Imm64(plan.node()->asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
518         break;
519     case SetStrictInt52Constant:
520         m_jit.move(Imm64(plan.node()->asMachineInt()), plan.gpr());
521         break;
522 #endif // USE(JSVALUE64)
523     case SetBooleanConstant:
524         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
525         break;
526     case SetCellConstant:
527         m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
528         break;
529 #if USE(JSVALUE64)
530     case SetTrustedJSConstant:
531         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
532         break;
533     case SetJSConstant:
534         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
535         break;
536     case SetDoubleConstant:
537         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
538         m_jit.move64ToDouble(canTrample, plan.fpr());
539         break;
540     case Load32PayloadBoxInt:
541         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
542         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
543         break;
544     case Load32PayloadConvertToInt52:
545         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
546         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
547         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
548         break;
549     case Load32PayloadSignExtend:
550         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
551         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
552         break;
553 #else
554     case SetJSConstantTag:
555         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
556         break;
557     case SetJSConstantPayload:
558         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
559         break;
560     case SetInt32Tag:
561         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
562         break;
563     case SetCellTag:
564         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
565         break;
566     case SetBooleanTag:
567         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
568         break;
569     case SetDoubleConstant:
570         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
571         break;
572 #endif
573     case Load32Tag:
574         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
575         break;
576     case Load32Payload:
577         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
578         break;
579     case LoadPtr:
580         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
581         break;
582 #if USE(JSVALUE64)
583     case Load64:
584         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
585         break;
586     case Load64ShiftInt52Right:
587         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
588         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
589         break;
590     case Load64ShiftInt52Left:
591         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
592         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
593         break;
594 #endif
595     case LoadDouble:
596         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
597         break;
598     default:
599         RELEASE_ASSERT_NOT_REACHED();
600     }
601 }
602     
603 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
604 {
605     switch (arrayMode.arrayClass()) {
606     case Array::OriginalArray: {
607         CRASH();
608 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
609         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
610         return result;
611 #endif
612     }
613         
614     case Array::Array:
615         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
616         return m_jit.branch32(
617             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
618         
619     case Array::NonArray:
620     case Array::OriginalNonArray:
621         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
622         return m_jit.branch32(
623             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
624         
625     case Array::PossiblyArray:
626         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
627         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
628     }
629     
630     RELEASE_ASSERT_NOT_REACHED();
631     return JITCompiler::Jump();
632 }
633
634 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
635 {
636     JITCompiler::JumpList result;
637     
638     switch (arrayMode.type()) {
639     case Array::Int32:
640         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
641
642     case Array::Double:
643         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
644
645     case Array::Contiguous:
646         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
647
648     case Array::ArrayStorage:
649     case Array::SlowPutArrayStorage: {
650         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
651         
652         if (arrayMode.isJSArray()) {
653             if (arrayMode.isSlowPut()) {
654                 result.append(
655                     m_jit.branchTest32(
656                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
657                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
658                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
659                 result.append(
660                     m_jit.branch32(
661                         MacroAssembler::Above, tempGPR,
662                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
663                 break;
664             }
665             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
666             result.append(
667                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
668             break;
669         }
670         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
671         if (arrayMode.isSlowPut()) {
672             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
673             result.append(
674                 m_jit.branch32(
675                     MacroAssembler::Above, tempGPR,
676                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
677             break;
678         }
679         result.append(
680             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
681         break;
682     }
683     default:
684         CRASH();
685         break;
686     }
687     
688     return result;
689 }
690
691 void SpeculativeJIT::checkArray(Node* node)
692 {
693     ASSERT(node->arrayMode().isSpecific());
694     ASSERT(!node->arrayMode().doesConversion());
695     
696     SpeculateCellOperand base(this, node->child1());
697     GPRReg baseReg = base.gpr();
698     
699     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
700         noResult(m_currentNode);
701         return;
702     }
703     
704     const ClassInfo* expectedClassInfo = 0;
705     
706     switch (node->arrayMode().type()) {
707     case Array::String:
708         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
709         break;
710     case Array::Int32:
711     case Array::Double:
712     case Array::Contiguous:
713     case Array::ArrayStorage:
714     case Array::SlowPutArrayStorage: {
715         GPRTemporary temp(this);
716         GPRReg tempGPR = temp.gpr();
717         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
718         speculationCheck(
719             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
720             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
721         
722         noResult(m_currentNode);
723         return;
724     }
725     case Array::Arguments:
726         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ArgumentsType);
727
728         noResult(m_currentNode);
729         return;
730     default:
731         speculateCellTypeWithoutTypeFiltering(
732             node->child1(), baseReg,
733             typeForTypedArrayType(node->arrayMode().typedArrayType()));
734         noResult(m_currentNode);
735         return;
736     }
737     
738     RELEASE_ASSERT(expectedClassInfo);
739     
740     GPRTemporary temp(this);
741     GPRTemporary temp2(this);
742     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
743     speculationCheck(
744         BadType, JSValueSource::unboxedCell(baseReg), node,
745         m_jit.branchPtr(
746             MacroAssembler::NotEqual,
747             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
748             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
749     
750     noResult(m_currentNode);
751 }
752
753 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
754 {
755     ASSERT(node->arrayMode().doesConversion());
756     
757     GPRTemporary temp(this);
758     GPRTemporary structure;
759     GPRReg tempGPR = temp.gpr();
760     GPRReg structureGPR = InvalidGPRReg;
761     
762     if (node->op() != ArrayifyToStructure) {
763         GPRTemporary realStructure(this);
764         structure.adopt(realStructure);
765         structureGPR = structure.gpr();
766     }
767         
768     // We can skip all that comes next if we already have array storage.
769     MacroAssembler::JumpList slowPath;
770     
771     if (node->op() == ArrayifyToStructure) {
772         slowPath.append(m_jit.branchWeakStructure(
773             JITCompiler::NotEqual,
774             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
775             node->structure()));
776     } else {
777         m_jit.load8(
778             MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
779         
780         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
781     }
782     
783     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
784         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
785     
786     noResult(m_currentNode);
787 }
788
789 void SpeculativeJIT::arrayify(Node* node)
790 {
791     ASSERT(node->arrayMode().isSpecific());
792     
793     SpeculateCellOperand base(this, node->child1());
794     
795     if (!node->child2()) {
796         arrayify(node, base.gpr(), InvalidGPRReg);
797         return;
798     }
799     
800     SpeculateInt32Operand property(this, node->child2());
801     
802     arrayify(node, base.gpr(), property.gpr());
803 }
804
805 GPRReg SpeculativeJIT::fillStorage(Edge edge)
806 {
807     VirtualRegister virtualRegister = edge->virtualRegister();
808     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
809     
810     switch (info.registerFormat()) {
811     case DataFormatNone: {
812         if (info.spillFormat() == DataFormatStorage) {
813             GPRReg gpr = allocate();
814             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
815             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
816             info.fillStorage(*m_stream, gpr);
817             return gpr;
818         }
819         
820         // Must be a cell; fill it as a cell and then return the pointer.
821         return fillSpeculateCell(edge);
822     }
823         
824     case DataFormatStorage: {
825         GPRReg gpr = info.gpr();
826         m_gprs.lock(gpr);
827         return gpr;
828     }
829         
830     default:
831         return fillSpeculateCell(edge);
832     }
833 }
834
835 void SpeculativeJIT::useChildren(Node* node)
836 {
837     if (node->flags() & NodeHasVarArgs) {
838         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
839             if (!!m_jit.graph().m_varArgChildren[childIdx])
840                 use(m_jit.graph().m_varArgChildren[childIdx]);
841         }
842     } else {
843         Edge child1 = node->child1();
844         if (!child1) {
845             ASSERT(!node->child2() && !node->child3());
846             return;
847         }
848         use(child1);
849         
850         Edge child2 = node->child2();
851         if (!child2) {
852             ASSERT(!node->child3());
853             return;
854         }
855         use(child2);
856         
857         Edge child3 = node->child3();
858         if (!child3)
859             return;
860         use(child3);
861     }
862 }
863
864 void SpeculativeJIT::compileIn(Node* node)
865 {
866     SpeculateCellOperand base(this, node->child2());
867     GPRReg baseGPR = base.gpr();
868     
869     if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
870         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
871             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo();
872             
873             GPRTemporary result(this);
874             GPRReg resultGPR = result.gpr();
875
876             use(node->child1());
877             
878             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
879             MacroAssembler::Label done = m_jit.label();
880             
881             auto slowPath = slowPathCall(
882                 jump.m_jump, this, operationInOptimize,
883                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
884                 string->tryGetValueImpl());
885             
886             stubInfo->codeOrigin = node->origin.semantic;
887             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
888             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
889             stubInfo->patch.usedRegisters = usedRegisters();
890             stubInfo->patch.spillMode = NeedToSpill;
891
892             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
893             addSlowPathGenerator(WTF::move(slowPath));
894
895             base.use();
896
897             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
898             return;
899         }
900     }
901
902     JSValueOperand key(this, node->child1());
903     JSValueRegs regs = key.jsValueRegs();
904         
905     GPRFlushedCallResult result(this);
906     GPRReg resultGPR = result.gpr();
907         
908     base.use();
909     key.use();
910         
911     flushRegisters();
912     callOperation(
913         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
914         baseGPR, regs);
915     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
916 }
917
918 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
919 {
920     unsigned branchIndexInBlock = detectPeepHoleBranch();
921     if (branchIndexInBlock != UINT_MAX) {
922         Node* branchNode = m_block->at(branchIndexInBlock);
923
924         ASSERT(node->adjustedRefCount() == 1);
925         
926         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
927     
928         m_indexInBlock = branchIndexInBlock;
929         m_currentNode = branchNode;
930         
931         return true;
932     }
933     
934     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
935     
936     return false;
937 }
938
939 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
940 {
941     unsigned branchIndexInBlock = detectPeepHoleBranch();
942     if (branchIndexInBlock != UINT_MAX) {
943         Node* branchNode = m_block->at(branchIndexInBlock);
944
945         ASSERT(node->adjustedRefCount() == 1);
946         
947         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
948     
949         m_indexInBlock = branchIndexInBlock;
950         m_currentNode = branchNode;
951         
952         return true;
953     }
954     
955     nonSpeculativeNonPeepholeStrictEq(node, invert);
956     
957     return false;
958 }
959
960 static const char* dataFormatString(DataFormat format)
961 {
962     // These values correspond to the DataFormat enum.
963     const char* strings[] = {
964         "[  ]",
965         "[ i]",
966         "[ d]",
967         "[ c]",
968         "Err!",
969         "Err!",
970         "Err!",
971         "Err!",
972         "[J ]",
973         "[Ji]",
974         "[Jd]",
975         "[Jc]",
976         "Err!",
977         "Err!",
978         "Err!",
979         "Err!",
980     };
981     return strings[format];
982 }
983
984 void SpeculativeJIT::dump(const char* label)
985 {
986     if (label)
987         dataLogF("<%s>\n", label);
988
989     dataLogF("  gprs:\n");
990     m_gprs.dump();
991     dataLogF("  fprs:\n");
992     m_fprs.dump();
993     dataLogF("  VirtualRegisters:\n");
994     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
995         GenerationInfo& info = m_generationInfo[i];
996         if (info.alive())
997             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
998         else
999             dataLogF("    % 3d:[__][__]", i);
1000         if (info.registerFormat() == DataFormatDouble)
1001             dataLogF(":fpr%d\n", info.fpr());
1002         else if (info.registerFormat() != DataFormatNone
1003 #if USE(JSVALUE32_64)
1004             && !(info.registerFormat() & DataFormatJS)
1005 #endif
1006             ) {
1007             ASSERT(info.gpr() != InvalidGPRReg);
1008             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1009         } else
1010             dataLogF("\n");
1011     }
1012     if (label)
1013         dataLogF("</%s>\n", label);
1014 }
1015
1016 GPRTemporary::GPRTemporary()
1017     : m_jit(0)
1018     , m_gpr(InvalidGPRReg)
1019 {
1020 }
1021
1022 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1023     : m_jit(jit)
1024     , m_gpr(InvalidGPRReg)
1025 {
1026     m_gpr = m_jit->allocate();
1027 }
1028
1029 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1030     : m_jit(jit)
1031     , m_gpr(InvalidGPRReg)
1032 {
1033     m_gpr = m_jit->allocate(specific);
1034 }
1035
1036 #if USE(JSVALUE32_64)
1037 GPRTemporary::GPRTemporary(
1038     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1039     : m_jit(jit)
1040     , m_gpr(InvalidGPRReg)
1041 {
1042     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1043         m_gpr = m_jit->reuse(op1.gpr(which));
1044     else
1045         m_gpr = m_jit->allocate();
1046 }
1047 #endif // USE(JSVALUE32_64)
1048
1049 JSValueRegsTemporary::JSValueRegsTemporary() { }
1050
1051 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1052 #if USE(JSVALUE64)
1053     : m_gpr(jit)
1054 #else
1055     : m_payloadGPR(jit)
1056     , m_tagGPR(jit)
1057 #endif
1058 {
1059 }
1060
1061 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1062
1063 JSValueRegs JSValueRegsTemporary::regs()
1064 {
1065 #if USE(JSVALUE64)
1066     return JSValueRegs(m_gpr.gpr());
1067 #else
1068     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1069 #endif
1070 }
1071
1072 void GPRTemporary::adopt(GPRTemporary& other)
1073 {
1074     ASSERT(!m_jit);
1075     ASSERT(m_gpr == InvalidGPRReg);
1076     ASSERT(other.m_jit);
1077     ASSERT(other.m_gpr != InvalidGPRReg);
1078     m_jit = other.m_jit;
1079     m_gpr = other.m_gpr;
1080     other.m_jit = 0;
1081     other.m_gpr = InvalidGPRReg;
1082 }
1083
1084 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1085     : m_jit(jit)
1086     , m_fpr(InvalidFPRReg)
1087 {
1088     m_fpr = m_jit->fprAllocate();
1089 }
1090
1091 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1092     : m_jit(jit)
1093     , m_fpr(InvalidFPRReg)
1094 {
1095     if (m_jit->canReuse(op1.node()))
1096         m_fpr = m_jit->reuse(op1.fpr());
1097     else
1098         m_fpr = m_jit->fprAllocate();
1099 }
1100
1101 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1102     : m_jit(jit)
1103     , m_fpr(InvalidFPRReg)
1104 {
1105     if (m_jit->canReuse(op1.node()))
1106         m_fpr = m_jit->reuse(op1.fpr());
1107     else if (m_jit->canReuse(op2.node()))
1108         m_fpr = m_jit->reuse(op2.fpr());
1109     else
1110         m_fpr = m_jit->fprAllocate();
1111 }
1112
1113 #if USE(JSVALUE32_64)
1114 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1115     : m_jit(jit)
1116     , m_fpr(InvalidFPRReg)
1117 {
1118     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1119         m_fpr = m_jit->reuse(op1.fpr());
1120     else
1121         m_fpr = m_jit->fprAllocate();
1122 }
1123 #endif
1124
1125 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1126 {
1127     BasicBlock* taken = branchNode->branchData()->taken.block;
1128     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1129     
1130     SpeculateDoubleOperand op1(this, node->child1());
1131     SpeculateDoubleOperand op2(this, node->child2());
1132     
1133     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1134     jump(notTaken);
1135 }
1136
1137 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1138 {
1139     BasicBlock* taken = branchNode->branchData()->taken.block;
1140     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1141
1142     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1143     
1144     if (taken == nextBlock()) {
1145         condition = MacroAssembler::NotEqual;
1146         BasicBlock* tmp = taken;
1147         taken = notTaken;
1148         notTaken = tmp;
1149     }
1150
1151     SpeculateCellOperand op1(this, node->child1());
1152     SpeculateCellOperand op2(this, node->child2());
1153     
1154     GPRReg op1GPR = op1.gpr();
1155     GPRReg op2GPR = op2.gpr();
1156     
1157     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1158         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1159             speculationCheck(
1160                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), 
1161                 m_jit.branchStructurePtr(
1162                     MacroAssembler::Equal, 
1163                     MacroAssembler::Address(op1GPR, JSCell::structureIDOffset()), 
1164                     m_jit.vm()->stringStructure.get()));
1165         }
1166         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1167             speculationCheck(
1168                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1169                 m_jit.branchStructurePtr(
1170                     MacroAssembler::Equal, 
1171                     MacroAssembler::Address(op2GPR, JSCell::structureIDOffset()), 
1172                     m_jit.vm()->stringStructure.get()));
1173         }
1174     } else {
1175         GPRTemporary structure(this);
1176         GPRTemporary temp(this);
1177         GPRReg structureGPR = structure.gpr();
1178
1179         m_jit.emitLoadStructure(op1GPR, structureGPR, temp.gpr());
1180         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1181             speculationCheck(
1182                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1183                 m_jit.branchPtr(
1184                     MacroAssembler::Equal, 
1185                     structureGPR, 
1186                     MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1187         }
1188         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1189             m_jit.branchTest8(
1190                 MacroAssembler::NonZero, 
1191                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1192                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1193
1194         m_jit.emitLoadStructure(op2GPR, structureGPR, temp.gpr());
1195         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1196             speculationCheck(
1197                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1198                 m_jit.branchPtr(
1199                     MacroAssembler::Equal, 
1200                     structureGPR, 
1201                     MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1202         }
1203         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1204             m_jit.branchTest8(
1205                 MacroAssembler::NonZero, 
1206                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1207                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1208     }
1209
1210     branchPtr(condition, op1GPR, op2GPR, taken);
1211     jump(notTaken);
1212 }
1213
1214 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1215 {
1216     BasicBlock* taken = branchNode->branchData()->taken.block;
1217     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1218
1219     // The branch instruction will branch to the taken block.
1220     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1221     if (taken == nextBlock()) {
1222         condition = JITCompiler::invert(condition);
1223         BasicBlock* tmp = taken;
1224         taken = notTaken;
1225         notTaken = tmp;
1226     }
1227
1228     if (node->child1()->isBooleanConstant()) {
1229         bool imm = node->child1()->asBoolean();
1230         SpeculateBooleanOperand op2(this, node->child2());
1231         branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1232     } else if (node->child2()->isBooleanConstant()) {
1233         SpeculateBooleanOperand op1(this, node->child1());
1234         bool imm = node->child2()->asBoolean();
1235         branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1236     } else {
1237         SpeculateBooleanOperand op1(this, node->child1());
1238         SpeculateBooleanOperand op2(this, node->child2());
1239         branch32(condition, op1.gpr(), op2.gpr(), taken);
1240     }
1241
1242     jump(notTaken);
1243 }
1244
1245 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1246 {
1247     BasicBlock* taken = branchNode->branchData()->taken.block;
1248     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1249
1250     // The branch instruction will branch to the taken block.
1251     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1252     if (taken == nextBlock()) {
1253         condition = JITCompiler::invert(condition);
1254         BasicBlock* tmp = taken;
1255         taken = notTaken;
1256         notTaken = tmp;
1257     }
1258
1259     if (node->child1()->isInt32Constant()) {
1260         int32_t imm = node->child1()->asInt32();
1261         SpeculateInt32Operand op2(this, node->child2());
1262         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1263     } else if (node->child2()->isInt32Constant()) {
1264         SpeculateInt32Operand op1(this, node->child1());
1265         int32_t imm = node->child2()->asInt32();
1266         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1267     } else {
1268         SpeculateInt32Operand op1(this, node->child1());
1269         SpeculateInt32Operand op2(this, node->child2());
1270         branch32(condition, op1.gpr(), op2.gpr(), taken);
1271     }
1272
1273     jump(notTaken);
1274 }
1275
1276 // Returns true if the compare is fused with a subsequent branch.
1277 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1278 {
1279     // Fused compare & branch.
1280     unsigned branchIndexInBlock = detectPeepHoleBranch();
1281     if (branchIndexInBlock != UINT_MAX) {
1282         Node* branchNode = m_block->at(branchIndexInBlock);
1283
1284         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1285         // so can be no intervening nodes to also reference the compare. 
1286         ASSERT(node->adjustedRefCount() == 1);
1287
1288         if (node->isBinaryUseKind(Int32Use))
1289             compilePeepHoleInt32Branch(node, branchNode, condition);
1290 #if USE(JSVALUE64)
1291         else if (node->isBinaryUseKind(Int52RepUse))
1292             compilePeepHoleInt52Branch(node, branchNode, condition);
1293 #endif // USE(JSVALUE64)
1294         else if (node->isBinaryUseKind(DoubleRepUse))
1295             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1296         else if (node->op() == CompareEq) {
1297             if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1298                 // Use non-peephole comparison, for now.
1299                 return false;
1300             }
1301             if (node->isBinaryUseKind(BooleanUse))
1302                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1303             else if (node->isBinaryUseKind(ObjectUse))
1304                 compilePeepHoleObjectEquality(node, branchNode);
1305             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1306                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1307             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1308                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1309             else {
1310                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1311                 return true;
1312             }
1313         } else {
1314             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1315             return true;
1316         }
1317
1318         use(node->child1());
1319         use(node->child2());
1320         m_indexInBlock = branchIndexInBlock;
1321         m_currentNode = branchNode;
1322         return true;
1323     }
1324     return false;
1325 }
1326
1327 void SpeculativeJIT::noticeOSRBirth(Node* node)
1328 {
1329     if (!node->hasVirtualRegister())
1330         return;
1331     
1332     VirtualRegister virtualRegister = node->virtualRegister();
1333     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1334     
1335     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1336 }
1337
1338 void SpeculativeJIT::compileMovHint(Node* node)
1339 {
1340     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1341     
1342     Node* child = node->child1().node();
1343     noticeOSRBirth(child);
1344     
1345     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1346 }
1347
1348 void SpeculativeJIT::bail(AbortReason reason)
1349 {
1350     if (verboseCompilationEnabled())
1351         dataLog("Bailing compilation.\n");
1352     m_compileOkay = true;
1353     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1354     clearGenerationInfo();
1355 }
1356
1357 void SpeculativeJIT::compileCurrentBlock()
1358 {
1359     ASSERT(m_compileOkay);
1360     
1361     if (!m_block)
1362         return;
1363     
1364     ASSERT(m_block->isReachable);
1365     
1366     m_jit.blockHeads()[m_block->index] = m_jit.label();
1367
1368     if (!m_block->intersectionOfCFAHasVisited) {
1369         // Don't generate code for basic blocks that are unreachable according to CFA.
1370         // But to be sure that nobody has generated a jump to this block, drop in a
1371         // breakpoint here.
1372         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1373         return;
1374     }
1375
1376     m_stream->appendAndLog(VariableEvent::reset());
1377     
1378     m_jit.jitAssertHasValidCallFrame();
1379     m_jit.jitAssertTagsInPlace();
1380     m_jit.jitAssertArgumentCountSane();
1381
1382     m_state.reset();
1383     m_state.beginBasicBlock(m_block);
1384     
1385     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1386         int operand = m_block->variablesAtHead.operandForIndex(i);
1387         Node* node = m_block->variablesAtHead[i];
1388         if (!node)
1389             continue; // No need to record dead SetLocal's.
1390         
1391         VariableAccessData* variable = node->variableAccessData();
1392         DataFormat format;
1393         if (!node->refCount())
1394             continue; // No need to record dead SetLocal's.
1395         format = dataFormatFor(variable->flushFormat());
1396         m_stream->appendAndLog(
1397             VariableEvent::setLocal(
1398                 VirtualRegister(operand),
1399                 variable->machineLocal(),
1400                 format));
1401     }
1402     
1403     m_codeOriginForExitTarget = CodeOrigin();
1404     m_codeOriginForExitProfile = CodeOrigin();
1405     
1406     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1407         m_currentNode = m_block->at(m_indexInBlock);
1408         
1409         // We may have hit a contradiction that the CFA was aware of but that the JIT
1410         // didn't cause directly.
1411         if (!m_state.isValid()) {
1412             bail(DFGBailedAtTopOfBlock);
1413             return;
1414         }
1415
1416         if (ASSERT_DISABLED)
1417             m_canExit = true; // Essentially disable the assertions.
1418         else
1419             m_canExit = mayExit(m_jit.graph(), m_currentNode);
1420         
1421         m_interpreter.startExecuting();
1422         m_jit.setForNode(m_currentNode);
1423         m_codeOriginForExitTarget = m_currentNode->origin.forExit;
1424         m_codeOriginForExitProfile = m_currentNode->origin.semantic;
1425         m_lastGeneratedNode = m_currentNode->op();
1426         if (!m_currentNode->shouldGenerate()) {
1427             switch (m_currentNode->op()) {
1428             case JSConstant:
1429                 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1430                 break;
1431                 
1432             case SetLocal:
1433                 RELEASE_ASSERT_NOT_REACHED();
1434                 break;
1435                 
1436             case MovHint:
1437                 compileMovHint(m_currentNode);
1438                 break;
1439                 
1440             case ZombieHint: {
1441                 recordSetLocal(m_currentNode->unlinkedLocal(), VirtualRegister(), DataFormatDead);
1442                 break;
1443             }
1444
1445             default:
1446                 if (belongsInMinifiedGraph(m_currentNode->op()))
1447                     m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1448                 break;
1449             }
1450         } else {
1451             
1452             if (verboseCompilationEnabled()) {
1453                 dataLogF(
1454                     "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1455                     (int)m_currentNode->index(),
1456                     m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1457                 dataLog("\n");
1458             }
1459             
1460             compile(m_currentNode);
1461
1462 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1463             m_jit.clearRegisterAllocationOffsets();
1464 #endif
1465
1466             if (!m_compileOkay) {
1467                 bail(DFGBailedAtEndOfNode);
1468                 return;
1469             }
1470             
1471             if (belongsInMinifiedGraph(m_currentNode->op())) {
1472                 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1473                 noticeOSRBirth(m_currentNode);
1474             }
1475         }
1476         
1477         // Make sure that the abstract state is rematerialized for the next node.
1478         m_interpreter.executeEffects(m_indexInBlock);
1479     }
1480     
1481     // Perform the most basic verification that children have been used correctly.
1482     if (!ASSERT_DISABLED) {
1483         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1484             GenerationInfo& info = m_generationInfo[index];
1485             RELEASE_ASSERT(!info.alive());
1486         }
1487     }
1488 }
1489
1490 // If we are making type predictions about our arguments then
1491 // we need to check that they are correct on function entry.
1492 void SpeculativeJIT::checkArgumentTypes()
1493 {
1494     ASSERT(!m_currentNode);
1495     m_isCheckingArgumentTypes = true;
1496     m_codeOriginForExitTarget = CodeOrigin(0);
1497     m_codeOriginForExitProfile = CodeOrigin(0);
1498
1499     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1500         Node* node = m_jit.graph().m_arguments[i];
1501         if (!node) {
1502             // The argument is dead. We don't do any checks for such arguments.
1503             continue;
1504         }
1505         
1506         ASSERT(node->op() == SetArgument);
1507         ASSERT(node->shouldGenerate());
1508
1509         VariableAccessData* variableAccessData = node->variableAccessData();
1510         FlushFormat format = variableAccessData->flushFormat();
1511         
1512         if (format == FlushedJSValue)
1513             continue;
1514         
1515         VirtualRegister virtualRegister = variableAccessData->local();
1516
1517         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1518         
1519 #if USE(JSVALUE64)
1520         switch (format) {
1521         case FlushedInt32: {
1522             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1523             break;
1524         }
1525         case FlushedBoolean: {
1526             GPRTemporary temp(this);
1527             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1528             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1529             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1530             break;
1531         }
1532         case FlushedCell: {
1533             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1534             break;
1535         }
1536         default:
1537             RELEASE_ASSERT_NOT_REACHED();
1538             break;
1539         }
1540 #else
1541         switch (format) {
1542         case FlushedInt32: {
1543             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1544             break;
1545         }
1546         case FlushedBoolean: {
1547             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1548             break;
1549         }
1550         case FlushedCell: {
1551             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1552             break;
1553         }
1554         default:
1555             RELEASE_ASSERT_NOT_REACHED();
1556             break;
1557         }
1558 #endif
1559     }
1560     m_isCheckingArgumentTypes = false;
1561 }
1562
1563 bool SpeculativeJIT::compile()
1564 {
1565     checkArgumentTypes();
1566     
1567     ASSERT(!m_currentNode);
1568     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1569         m_jit.setForBlockIndex(blockIndex);
1570         m_block = m_jit.graph().block(blockIndex);
1571         compileCurrentBlock();
1572     }
1573     linkBranches();
1574     return true;
1575 }
1576
1577 void SpeculativeJIT::createOSREntries()
1578 {
1579     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1580         BasicBlock* block = m_jit.graph().block(blockIndex);
1581         if (!block)
1582             continue;
1583         if (!block->isOSRTarget)
1584             continue;
1585         
1586         // Currently we don't have OSR entry trampolines. We could add them
1587         // here if need be.
1588         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1589     }
1590 }
1591
1592 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1593 {
1594     unsigned osrEntryIndex = 0;
1595     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1596         BasicBlock* block = m_jit.graph().block(blockIndex);
1597         if (!block)
1598             continue;
1599         if (!block->isOSRTarget)
1600             continue;
1601         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1602     }
1603     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1604 }
1605
1606 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1607 {
1608     Edge child3 = m_jit.graph().varArgChild(node, 2);
1609     Edge child4 = m_jit.graph().varArgChild(node, 3);
1610
1611     ArrayMode arrayMode = node->arrayMode();
1612     
1613     GPRReg baseReg = base.gpr();
1614     GPRReg propertyReg = property.gpr();
1615     
1616     SpeculateDoubleOperand value(this, child3);
1617
1618     FPRReg valueReg = value.fpr();
1619     
1620     DFG_TYPE_CHECK(
1621         JSValueRegs(), child3, SpecFullRealNumber,
1622         m_jit.branchDouble(
1623             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1624     
1625     if (!m_compileOkay)
1626         return;
1627     
1628     StorageOperand storage(this, child4);
1629     GPRReg storageReg = storage.gpr();
1630
1631     if (node->op() == PutByValAlias) {
1632         // Store the value to the array.
1633         GPRReg propertyReg = property.gpr();
1634         FPRReg valueReg = value.fpr();
1635         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1636         
1637         noResult(m_currentNode);
1638         return;
1639     }
1640     
1641     GPRTemporary temporary;
1642     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1643
1644     MacroAssembler::Jump slowCase;
1645     
1646     if (arrayMode.isInBounds()) {
1647         speculationCheck(
1648             OutOfBounds, JSValueRegs(), 0,
1649             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1650     } else {
1651         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1652         
1653         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1654         
1655         if (!arrayMode.isOutOfBounds())
1656             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1657         
1658         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1659         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1660         
1661         inBounds.link(&m_jit);
1662     }
1663     
1664     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1665
1666     base.use();
1667     property.use();
1668     value.use();
1669     storage.use();
1670     
1671     if (arrayMode.isOutOfBounds()) {
1672         addSlowPathGenerator(
1673             slowPathCall(
1674                 slowCase, this,
1675                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1676                 NoResult, baseReg, propertyReg, valueReg));
1677     }
1678
1679     noResult(m_currentNode, UseChildrenCalledExplicitly);
1680 }
1681
1682 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1683 {
1684     SpeculateCellOperand string(this, node->child1());
1685     SpeculateStrictInt32Operand index(this, node->child2());
1686     StorageOperand storage(this, node->child3());
1687
1688     GPRReg stringReg = string.gpr();
1689     GPRReg indexReg = index.gpr();
1690     GPRReg storageReg = storage.gpr();
1691     
1692     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1693
1694     // unsigned comparison so we can filter out negative indices and indices that are too large
1695     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1696
1697     GPRTemporary scratch(this);
1698     GPRReg scratchReg = scratch.gpr();
1699
1700     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1701
1702     // Load the character into scratchReg
1703     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1704
1705     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1706     JITCompiler::Jump cont8Bit = m_jit.jump();
1707
1708     is16Bit.link(&m_jit);
1709
1710     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1711
1712     cont8Bit.link(&m_jit);
1713
1714     int32Result(scratchReg, m_currentNode);
1715 }
1716
1717 void SpeculativeJIT::compileGetByValOnString(Node* node)
1718 {
1719     SpeculateCellOperand base(this, node->child1());
1720     SpeculateStrictInt32Operand property(this, node->child2());
1721     StorageOperand storage(this, node->child3());
1722     GPRReg baseReg = base.gpr();
1723     GPRReg propertyReg = property.gpr();
1724     GPRReg storageReg = storage.gpr();
1725
1726     GPRTemporary scratch(this);
1727     GPRReg scratchReg = scratch.gpr();
1728 #if USE(JSVALUE32_64)
1729     GPRTemporary resultTag;
1730     GPRReg resultTagReg = InvalidGPRReg;
1731     if (node->arrayMode().isOutOfBounds()) {
1732         GPRTemporary realResultTag(this);
1733         resultTag.adopt(realResultTag);
1734         resultTagReg = resultTag.gpr();
1735     }
1736 #endif
1737
1738     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1739
1740     // unsigned comparison so we can filter out negative indices and indices that are too large
1741     JITCompiler::Jump outOfBounds = m_jit.branch32(
1742         MacroAssembler::AboveOrEqual, propertyReg,
1743         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1744     if (node->arrayMode().isInBounds())
1745         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1746
1747     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1748
1749     // Load the character into scratchReg
1750     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1751
1752     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1753     JITCompiler::Jump cont8Bit = m_jit.jump();
1754
1755     is16Bit.link(&m_jit);
1756
1757     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1758
1759     JITCompiler::Jump bigCharacter =
1760         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1761
1762     // 8 bit string values don't need the isASCII check.
1763     cont8Bit.link(&m_jit);
1764
1765     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1766     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1767     m_jit.loadPtr(scratchReg, scratchReg);
1768
1769     addSlowPathGenerator(
1770         slowPathCall(
1771             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1772
1773     if (node->arrayMode().isOutOfBounds()) {
1774 #if USE(JSVALUE32_64)
1775         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1776 #endif
1777
1778         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1779         if (globalObject->stringPrototypeChainIsSane()) {
1780 #if USE(JSVALUE64)
1781             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1782                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
1783 #else
1784             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1785                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
1786                 baseReg, propertyReg));
1787 #endif
1788         } else {
1789 #if USE(JSVALUE64)
1790             addSlowPathGenerator(
1791                 slowPathCall(
1792                     outOfBounds, this, operationGetByValStringInt,
1793                     scratchReg, baseReg, propertyReg));
1794 #else
1795             addSlowPathGenerator(
1796                 slowPathCall(
1797                     outOfBounds, this, operationGetByValStringInt,
1798                     resultTagReg, scratchReg, baseReg, propertyReg));
1799 #endif
1800         }
1801         
1802 #if USE(JSVALUE64)
1803         jsValueResult(scratchReg, m_currentNode);
1804 #else
1805         jsValueResult(resultTagReg, scratchReg, m_currentNode);
1806 #endif
1807     } else
1808         cellResult(scratchReg, m_currentNode);
1809 }
1810
1811 void SpeculativeJIT::compileFromCharCode(Node* node)
1812 {
1813     SpeculateStrictInt32Operand property(this, node->child1());
1814     GPRReg propertyReg = property.gpr();
1815     GPRTemporary smallStrings(this);
1816     GPRTemporary scratch(this);
1817     GPRReg scratchReg = scratch.gpr();
1818     GPRReg smallStringsReg = smallStrings.gpr();
1819
1820     JITCompiler::JumpList slowCases;
1821     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
1822     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
1823     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
1824
1825     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
1826     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
1827     cellResult(scratchReg, m_currentNode);
1828 }
1829
1830 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
1831 {
1832     VirtualRegister virtualRegister = node->virtualRegister();
1833     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1834
1835     switch (info.registerFormat()) {
1836     case DataFormatStorage:
1837         RELEASE_ASSERT_NOT_REACHED();
1838
1839     case DataFormatBoolean:
1840     case DataFormatCell:
1841         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1842         return GeneratedOperandTypeUnknown;
1843
1844     case DataFormatNone:
1845     case DataFormatJSCell:
1846     case DataFormatJS:
1847     case DataFormatJSBoolean:
1848     case DataFormatJSDouble:
1849         return GeneratedOperandJSValue;
1850
1851     case DataFormatJSInt32:
1852     case DataFormatInt32:
1853         return GeneratedOperandInteger;
1854
1855     default:
1856         RELEASE_ASSERT_NOT_REACHED();
1857         return GeneratedOperandTypeUnknown;
1858     }
1859 }
1860
1861 void SpeculativeJIT::compileValueToInt32(Node* node)
1862 {
1863     switch (node->child1().useKind()) {
1864 #if USE(JSVALUE64)
1865     case Int52RepUse: {
1866         SpeculateStrictInt52Operand op1(this, node->child1());
1867         GPRTemporary result(this, Reuse, op1);
1868         GPRReg op1GPR = op1.gpr();
1869         GPRReg resultGPR = result.gpr();
1870         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
1871         int32Result(resultGPR, node, DataFormatInt32);
1872         return;
1873     }
1874 #endif // USE(JSVALUE64)
1875         
1876     case DoubleRepUse: {
1877         GPRTemporary result(this);
1878         SpeculateDoubleOperand op1(this, node->child1());
1879         FPRReg fpr = op1.fpr();
1880         GPRReg gpr = result.gpr();
1881         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
1882         
1883         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
1884         
1885         int32Result(gpr, node);
1886         return;
1887     }
1888     
1889     case NumberUse:
1890     case NotCellUse: {
1891         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
1892         case GeneratedOperandInteger: {
1893             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
1894             GPRTemporary result(this, Reuse, op1);
1895             m_jit.move(op1.gpr(), result.gpr());
1896             int32Result(result.gpr(), node, op1.format());
1897             return;
1898         }
1899         case GeneratedOperandJSValue: {
1900             GPRTemporary result(this);
1901 #if USE(JSVALUE64)
1902             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1903
1904             GPRReg gpr = op1.gpr();
1905             GPRReg resultGpr = result.gpr();
1906             FPRTemporary tempFpr(this);
1907             FPRReg fpr = tempFpr.fpr();
1908
1909             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
1910             JITCompiler::JumpList converted;
1911
1912             if (node->child1().useKind() == NumberUse) {
1913                 DFG_TYPE_CHECK(
1914                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
1915                     m_jit.branchTest64(
1916                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
1917             } else {
1918                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
1919                 
1920                 DFG_TYPE_CHECK(
1921                     JSValueRegs(gpr), node->child1(), ~SpecCell, branchIsCell(JSValueRegs(gpr)));
1922                 
1923                 // It's not a cell: so true turns into 1 and all else turns into 0.
1924                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
1925                 converted.append(m_jit.jump());
1926                 
1927                 isNumber.link(&m_jit);
1928             }
1929
1930             // First, if we get here we have a double encoded as a JSValue
1931             m_jit.move(gpr, resultGpr);
1932             unboxDouble(resultGpr, fpr);
1933
1934             silentSpillAllRegisters(resultGpr);
1935             callOperation(toInt32, resultGpr, fpr);
1936             silentFillAllRegisters(resultGpr);
1937
1938             converted.append(m_jit.jump());
1939
1940             isInteger.link(&m_jit);
1941             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
1942
1943             converted.link(&m_jit);
1944 #else
1945             Node* childNode = node->child1().node();
1946             VirtualRegister virtualRegister = childNode->virtualRegister();
1947             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1948
1949             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1950
1951             GPRReg payloadGPR = op1.payloadGPR();
1952             GPRReg resultGpr = result.gpr();
1953         
1954             JITCompiler::JumpList converted;
1955
1956             if (info.registerFormat() == DataFormatJSInt32)
1957                 m_jit.move(payloadGPR, resultGpr);
1958             else {
1959                 GPRReg tagGPR = op1.tagGPR();
1960                 FPRTemporary tempFpr(this);
1961                 FPRReg fpr = tempFpr.fpr();
1962                 FPRTemporary scratch(this);
1963
1964                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
1965
1966                 if (node->child1().useKind() == NumberUse) {
1967                     DFG_TYPE_CHECK(
1968                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
1969                         m_jit.branch32(
1970                             MacroAssembler::AboveOrEqual, tagGPR,
1971                             TrustedImm32(JSValue::LowestTag)));
1972                 } else {
1973                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
1974                     
1975                     DFG_TYPE_CHECK(
1976                         op1.jsValueRegs(), node->child1(), ~SpecCell,
1977                         branchIsCell(op1.jsValueRegs()));
1978                     
1979                     // It's not a cell: so true turns into 1 and all else turns into 0.
1980                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
1981                     m_jit.move(TrustedImm32(0), resultGpr);
1982                     converted.append(m_jit.jump());
1983                     
1984                     isBoolean.link(&m_jit);
1985                     m_jit.move(payloadGPR, resultGpr);
1986                     converted.append(m_jit.jump());
1987                     
1988                     isNumber.link(&m_jit);
1989                 }
1990
1991                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
1992
1993                 silentSpillAllRegisters(resultGpr);
1994                 callOperation(toInt32, resultGpr, fpr);
1995                 silentFillAllRegisters(resultGpr);
1996
1997                 converted.append(m_jit.jump());
1998
1999                 isInteger.link(&m_jit);
2000                 m_jit.move(payloadGPR, resultGpr);
2001
2002                 converted.link(&m_jit);
2003             }
2004 #endif
2005             int32Result(resultGpr, node);
2006             return;
2007         }
2008         case GeneratedOperandTypeUnknown:
2009             RELEASE_ASSERT(!m_compileOkay);
2010             return;
2011         }
2012         RELEASE_ASSERT_NOT_REACHED();
2013         return;
2014     }
2015     
2016     default:
2017         ASSERT(!m_compileOkay);
2018         return;
2019     }
2020 }
2021
2022 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2023 {
2024     if (doesOverflow(node->arithMode())) {
2025         // We know that this sometimes produces doubles. So produce a double every
2026         // time. This at least allows subsequent code to not have weird conditionals.
2027             
2028         SpeculateInt32Operand op1(this, node->child1());
2029         FPRTemporary result(this);
2030             
2031         GPRReg inputGPR = op1.gpr();
2032         FPRReg outputFPR = result.fpr();
2033             
2034         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2035             
2036         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2037         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2038         positive.link(&m_jit);
2039             
2040         doubleResult(outputFPR, node);
2041         return;
2042     }
2043     
2044     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2045
2046     SpeculateInt32Operand op1(this, node->child1());
2047     GPRTemporary result(this);
2048
2049     m_jit.move(op1.gpr(), result.gpr());
2050
2051     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2052
2053     int32Result(result.gpr(), node, op1.format());
2054 }
2055
2056 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2057 {
2058     SpeculateDoubleOperand op1(this, node->child1());
2059     FPRTemporary scratch(this);
2060     GPRTemporary result(this);
2061     
2062     FPRReg valueFPR = op1.fpr();
2063     FPRReg scratchFPR = scratch.fpr();
2064     GPRReg resultGPR = result.gpr();
2065
2066     JITCompiler::JumpList failureCases;
2067     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2068     m_jit.branchConvertDoubleToInt32(
2069         valueFPR, resultGPR, failureCases, scratchFPR,
2070         shouldCheckNegativeZero(node->arithMode()));
2071     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2072
2073     int32Result(resultGPR, node);
2074 }
2075
2076 void SpeculativeJIT::compileDoubleRep(Node* node)
2077 {
2078     switch (node->child1().useKind()) {
2079     case NumberUse: {
2080         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2081     
2082         if (isInt32Speculation(m_state.forNode(node->child1()).m_type)) {
2083             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2084             FPRTemporary result(this);
2085             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2086             doubleResult(result.fpr(), node);
2087             return;
2088         }
2089     
2090         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2091         FPRTemporary result(this);
2092     
2093 #if USE(JSVALUE64)
2094         GPRTemporary temp(this);
2095
2096         GPRReg op1GPR = op1.gpr();
2097         GPRReg tempGPR = temp.gpr();
2098         FPRReg resultFPR = result.fpr();
2099     
2100         JITCompiler::Jump isInteger = m_jit.branch64(
2101             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2102     
2103         if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2104             typeCheck(
2105                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2106                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2107         }
2108     
2109         m_jit.move(op1GPR, tempGPR);
2110         unboxDouble(tempGPR, resultFPR);
2111         JITCompiler::Jump done = m_jit.jump();
2112     
2113         isInteger.link(&m_jit);
2114         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2115         done.link(&m_jit);
2116 #else // USE(JSVALUE64) -> this is the 32_64 case
2117         FPRTemporary temp(this);
2118     
2119         GPRReg op1TagGPR = op1.tagGPR();
2120         GPRReg op1PayloadGPR = op1.payloadGPR();
2121         FPRReg tempFPR = temp.fpr();
2122         FPRReg resultFPR = result.fpr();
2123     
2124         JITCompiler::Jump isInteger = m_jit.branch32(
2125             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2126     
2127         if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2128             typeCheck(
2129                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2130                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2131         }
2132     
2133         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2134         JITCompiler::Jump done = m_jit.jump();
2135     
2136         isInteger.link(&m_jit);
2137         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2138         done.link(&m_jit);
2139 #endif // USE(JSVALUE64)
2140     
2141         doubleResult(resultFPR, node);
2142         return;
2143     }
2144         
2145 #if USE(JSVALUE64)
2146     case Int52RepUse: {
2147         SpeculateStrictInt52Operand value(this, node->child1());
2148         FPRTemporary result(this);
2149         
2150         GPRReg valueGPR = value.gpr();
2151         FPRReg resultFPR = result.fpr();
2152
2153         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2154         
2155         doubleResult(resultFPR, node);
2156         return;
2157     }
2158 #endif // USE(JSVALUE64)
2159         
2160     default:
2161         RELEASE_ASSERT_NOT_REACHED();
2162         return;
2163     }
2164 }
2165
2166 void SpeculativeJIT::compileValueRep(Node* node)
2167 {
2168     switch (node->child1().useKind()) {
2169     case DoubleRepUse: {
2170         SpeculateDoubleOperand value(this, node->child1());
2171         JSValueRegsTemporary result(this);
2172         
2173         FPRReg valueFPR = value.fpr();
2174         JSValueRegs resultRegs = result.regs();
2175         
2176         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2177         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2178         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2179         // local was purified.
2180         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2181             m_jit.purifyNaN(valueFPR);
2182
2183         boxDouble(valueFPR, resultRegs);
2184         
2185         jsValueResult(resultRegs, node);
2186         return;
2187     }
2188         
2189 #if USE(JSVALUE64)
2190     case Int52RepUse: {
2191         SpeculateStrictInt52Operand value(this, node->child1());
2192         GPRTemporary result(this);
2193         
2194         GPRReg valueGPR = value.gpr();
2195         GPRReg resultGPR = result.gpr();
2196         
2197         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2198         
2199         jsValueResult(resultGPR, node);
2200         return;
2201     }
2202 #endif // USE(JSVALUE64)
2203         
2204     default:
2205         RELEASE_ASSERT_NOT_REACHED();
2206         return;
2207     }
2208 }
2209
2210 static double clampDoubleToByte(double d)
2211 {
2212     d += 0.5;
2213     if (!(d > 0))
2214         d = 0;
2215     else if (d > 255)
2216         d = 255;
2217     return d;
2218 }
2219
2220 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2221 {
2222     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2223     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2224     jit.xorPtr(result, result);
2225     MacroAssembler::Jump clamped = jit.jump();
2226     tooBig.link(&jit);
2227     jit.move(JITCompiler::TrustedImm32(255), result);
2228     clamped.link(&jit);
2229     inBounds.link(&jit);
2230 }
2231
2232 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2233 {
2234     // Unordered compare so we pick up NaN
2235     static const double zero = 0;
2236     static const double byteMax = 255;
2237     static const double half = 0.5;
2238     jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2239     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2240     jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2241     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2242     
2243     jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2244     // FIXME: This should probably just use a floating point round!
2245     // https://bugs.webkit.org/show_bug.cgi?id=72054
2246     jit.addDouble(source, scratch);
2247     jit.truncateDoubleToInt32(scratch, result);   
2248     MacroAssembler::Jump truncatedInt = jit.jump();
2249     
2250     tooSmall.link(&jit);
2251     jit.xorPtr(result, result);
2252     MacroAssembler::Jump zeroed = jit.jump();
2253     
2254     tooBig.link(&jit);
2255     jit.move(JITCompiler::TrustedImm32(255), result);
2256     
2257     truncatedInt.link(&jit);
2258     zeroed.link(&jit);
2259
2260 }
2261
2262 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2263 {
2264     if (node->op() == PutByValAlias)
2265         return JITCompiler::Jump();
2266     if (JSArrayBufferView* view = m_jit.graph().tryGetFoldableViewForChild1(node)) {
2267         uint32_t length = view->length();
2268         Node* indexNode = m_jit.graph().child(node, 1).node();
2269         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2270             return JITCompiler::Jump();
2271         return m_jit.branch32(
2272             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2273     }
2274     return m_jit.branch32(
2275         MacroAssembler::AboveOrEqual, indexGPR,
2276         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2277 }
2278
2279 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2280 {
2281     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2282     if (!jump.isSet())
2283         return;
2284     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2285 }
2286
2287 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2288 {
2289     ASSERT(isInt(type));
2290     
2291     SpeculateCellOperand base(this, node->child1());
2292     SpeculateStrictInt32Operand property(this, node->child2());
2293     StorageOperand storage(this, node->child3());
2294
2295     GPRReg baseReg = base.gpr();
2296     GPRReg propertyReg = property.gpr();
2297     GPRReg storageReg = storage.gpr();
2298
2299     GPRTemporary result(this);
2300     GPRReg resultReg = result.gpr();
2301
2302     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2303
2304     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2305     switch (elementSize(type)) {
2306     case 1:
2307         if (isSigned(type))
2308             m_jit.load8Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2309         else
2310             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2311         break;
2312     case 2:
2313         if (isSigned(type))
2314             m_jit.load16Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2315         else
2316             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2317         break;
2318     case 4:
2319         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2320         break;
2321     default:
2322         CRASH();
2323     }
2324     if (elementSize(type) < 4 || isSigned(type)) {
2325         int32Result(resultReg, node);
2326         return;
2327     }
2328     
2329     ASSERT(elementSize(type) == 4 && !isSigned(type));
2330     if (node->shouldSpeculateInt32()) {
2331         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2332         int32Result(resultReg, node);
2333         return;
2334     }
2335     
2336 #if USE(JSVALUE64)
2337     if (node->shouldSpeculateMachineInt()) {
2338         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2339         strictInt52Result(resultReg, node);
2340         return;
2341     }
2342 #endif
2343     
2344     FPRTemporary fresult(this);
2345     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2346     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2347     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2348     positive.link(&m_jit);
2349     doubleResult(fresult.fpr(), node);
2350 }
2351
2352 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2353 {
2354     ASSERT(isInt(type));
2355     
2356     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2357     GPRReg storageReg = storage.gpr();
2358     
2359     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2360     
2361     GPRTemporary value;
2362     GPRReg valueGPR = InvalidGPRReg;
2363     
2364     if (valueUse->isConstant()) {
2365         JSValue jsValue = valueUse->asJSValue();
2366         if (!jsValue.isNumber()) {
2367             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2368             noResult(node);
2369             return;
2370         }
2371         double d = jsValue.asNumber();
2372         if (isClamped(type)) {
2373             ASSERT(elementSize(type) == 1);
2374             d = clampDoubleToByte(d);
2375         }
2376         GPRTemporary scratch(this);
2377         GPRReg scratchReg = scratch.gpr();
2378         m_jit.move(Imm32(toInt32(d)), scratchReg);
2379         value.adopt(scratch);
2380         valueGPR = scratchReg;
2381     } else {
2382         switch (valueUse.useKind()) {
2383         case Int32Use: {
2384             SpeculateInt32Operand valueOp(this, valueUse);
2385             GPRTemporary scratch(this);
2386             GPRReg scratchReg = scratch.gpr();
2387             m_jit.move(valueOp.gpr(), scratchReg);
2388             if (isClamped(type)) {
2389                 ASSERT(elementSize(type) == 1);
2390                 compileClampIntegerToByte(m_jit, scratchReg);
2391             }
2392             value.adopt(scratch);
2393             valueGPR = scratchReg;
2394             break;
2395         }
2396             
2397 #if USE(JSVALUE64)
2398         case Int52RepUse: {
2399             SpeculateStrictInt52Operand valueOp(this, valueUse);
2400             GPRTemporary scratch(this);
2401             GPRReg scratchReg = scratch.gpr();
2402             m_jit.move(valueOp.gpr(), scratchReg);
2403             if (isClamped(type)) {
2404                 ASSERT(elementSize(type) == 1);
2405                 MacroAssembler::Jump inBounds = m_jit.branch64(
2406                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2407                 MacroAssembler::Jump tooBig = m_jit.branch64(
2408                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2409                 m_jit.move(TrustedImm32(0), scratchReg);
2410                 MacroAssembler::Jump clamped = m_jit.jump();
2411                 tooBig.link(&m_jit);
2412                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2413                 clamped.link(&m_jit);
2414                 inBounds.link(&m_jit);
2415             }
2416             value.adopt(scratch);
2417             valueGPR = scratchReg;
2418             break;
2419         }
2420 #endif // USE(JSVALUE64)
2421             
2422         case DoubleRepUse: {
2423             if (isClamped(type)) {
2424                 ASSERT(elementSize(type) == 1);
2425                 SpeculateDoubleOperand valueOp(this, valueUse);
2426                 GPRTemporary result(this);
2427                 FPRTemporary floatScratch(this);
2428                 FPRReg fpr = valueOp.fpr();
2429                 GPRReg gpr = result.gpr();
2430                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2431                 value.adopt(result);
2432                 valueGPR = gpr;
2433             } else {
2434                 SpeculateDoubleOperand valueOp(this, valueUse);
2435                 GPRTemporary result(this);
2436                 FPRReg fpr = valueOp.fpr();
2437                 GPRReg gpr = result.gpr();
2438                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2439                 m_jit.xorPtr(gpr, gpr);
2440                 MacroAssembler::Jump fixed = m_jit.jump();
2441                 notNaN.link(&m_jit);
2442                 
2443                 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2444                     fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2445                 
2446                 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr));
2447                 
2448                 fixed.link(&m_jit);
2449                 value.adopt(result);
2450                 valueGPR = gpr;
2451             }
2452             break;
2453         }
2454             
2455         default:
2456             RELEASE_ASSERT_NOT_REACHED();
2457             break;
2458         }
2459     }
2460     
2461     ASSERT_UNUSED(valueGPR, valueGPR != property);
2462     ASSERT(valueGPR != base);
2463     ASSERT(valueGPR != storageReg);
2464     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2465     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2466         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2467         outOfBounds = MacroAssembler::Jump();
2468     }
2469
2470     switch (elementSize(type)) {
2471     case 1:
2472         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2473         break;
2474     case 2:
2475         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2476         break;
2477     case 4:
2478         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2479         break;
2480     default:
2481         CRASH();
2482     }
2483     if (outOfBounds.isSet())
2484         outOfBounds.link(&m_jit);
2485     noResult(node);
2486 }
2487
2488 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2489 {
2490     ASSERT(isFloat(type));
2491     
2492     SpeculateCellOperand base(this, node->child1());
2493     SpeculateStrictInt32Operand property(this, node->child2());
2494     StorageOperand storage(this, node->child3());
2495
2496     GPRReg baseReg = base.gpr();
2497     GPRReg propertyReg = property.gpr();
2498     GPRReg storageReg = storage.gpr();
2499
2500     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2501
2502     FPRTemporary result(this);
2503     FPRReg resultReg = result.fpr();
2504     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2505     switch (elementSize(type)) {
2506     case 4:
2507         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2508         m_jit.convertFloatToDouble(resultReg, resultReg);
2509         break;
2510     case 8: {
2511         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2512         break;
2513     }
2514     default:
2515         RELEASE_ASSERT_NOT_REACHED();
2516     }
2517     
2518     doubleResult(resultReg, node);
2519 }
2520
2521 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2522 {
2523     ASSERT(isFloat(type));
2524     
2525     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2526     GPRReg storageReg = storage.gpr();
2527     
2528     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2529     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2530
2531     SpeculateDoubleOperand valueOp(this, valueUse);
2532     FPRTemporary scratch(this);
2533     FPRReg valueFPR = valueOp.fpr();
2534     FPRReg scratchFPR = scratch.fpr();
2535
2536     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2537     
2538     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2539     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2540         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2541         outOfBounds = MacroAssembler::Jump();
2542     }
2543     
2544     switch (elementSize(type)) {
2545     case 4: {
2546         m_jit.moveDouble(valueFPR, scratchFPR);
2547         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2548         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2549         break;
2550     }
2551     case 8:
2552         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2553         break;
2554     default:
2555         RELEASE_ASSERT_NOT_REACHED();
2556     }
2557     if (outOfBounds.isSet())
2558         outOfBounds.link(&m_jit);
2559     noResult(node);
2560 }
2561
2562 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2563 {
2564     // Check that prototype is an object.
2565     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfCellNotObject(prototypeReg));
2566     
2567     // Initialize scratchReg with the value being checked.
2568     m_jit.move(valueReg, scratchReg);
2569     
2570     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2571     MacroAssembler::Label loop(&m_jit);
2572     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2573     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2574     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2575 #if USE(JSVALUE64)
2576     branchIsCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2577 #else
2578     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2579 #endif
2580     
2581     // No match - result is false.
2582 #if USE(JSVALUE64)
2583     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2584 #else
2585     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2586 #endif
2587     MacroAssembler::Jump putResult = m_jit.jump();
2588     
2589     isInstance.link(&m_jit);
2590 #if USE(JSVALUE64)
2591     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2592 #else
2593     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2594 #endif
2595     
2596     putResult.link(&m_jit);
2597 }
2598
2599 void SpeculativeJIT::compileInstanceOf(Node* node)
2600 {
2601     if (node->child1().useKind() == UntypedUse) {
2602         // It might not be a cell. Speculate less aggressively.
2603         // Or: it might only be used once (i.e. by us), so we get zero benefit
2604         // from speculating any more aggressively than we absolutely need to.
2605         
2606         JSValueOperand value(this, node->child1());
2607         SpeculateCellOperand prototype(this, node->child2());
2608         GPRTemporary scratch(this);
2609         GPRTemporary scratch2(this);
2610         
2611         GPRReg prototypeReg = prototype.gpr();
2612         GPRReg scratchReg = scratch.gpr();
2613         GPRReg scratch2Reg = scratch2.gpr();
2614         
2615         MacroAssembler::Jump isCell = branchIsCell(value.jsValueRegs());
2616         GPRReg valueReg = value.jsValueRegs().payloadGPR();
2617         moveFalseTo(scratchReg);
2618
2619         MacroAssembler::Jump done = m_jit.jump();
2620         
2621         isCell.link(&m_jit);
2622         
2623         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2624         
2625         done.link(&m_jit);
2626
2627         blessedBooleanResult(scratchReg, node);
2628         return;
2629     }
2630     
2631     SpeculateCellOperand value(this, node->child1());
2632     SpeculateCellOperand prototype(this, node->child2());
2633     
2634     GPRTemporary scratch(this);
2635     GPRTemporary scratch2(this);
2636     
2637     GPRReg valueReg = value.gpr();
2638     GPRReg prototypeReg = prototype.gpr();
2639     GPRReg scratchReg = scratch.gpr();
2640     GPRReg scratch2Reg = scratch2.gpr();
2641     
2642     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2643
2644     blessedBooleanResult(scratchReg, node);
2645 }
2646
2647 void SpeculativeJIT::compileAdd(Node* node)
2648 {
2649     switch (node->binaryUseKind()) {
2650     case Int32Use: {
2651         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2652         
2653         if (node->child1()->isInt32Constant()) {
2654             int32_t imm1 = node->child1()->asInt32();
2655             SpeculateInt32Operand op2(this, node->child2());
2656             GPRTemporary result(this);
2657
2658             if (!shouldCheckOverflow(node->arithMode())) {
2659                 m_jit.move(op2.gpr(), result.gpr());
2660                 m_jit.add32(Imm32(imm1), result.gpr());
2661             } else
2662                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
2663
2664             int32Result(result.gpr(), node);
2665             return;
2666         }
2667         
2668         if (node->child2()->isInt32Constant()) {
2669             SpeculateInt32Operand op1(this, node->child1());
2670             int32_t imm2 = node->child2()->asInt32();
2671             GPRTemporary result(this);
2672                 
2673             if (!shouldCheckOverflow(node->arithMode())) {
2674                 m_jit.move(op1.gpr(), result.gpr());
2675                 m_jit.add32(Imm32(imm2), result.gpr());
2676             } else
2677                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
2678
2679             int32Result(result.gpr(), node);
2680             return;
2681         }
2682                 
2683         SpeculateInt32Operand op1(this, node->child1());
2684         SpeculateInt32Operand op2(this, node->child2());
2685         GPRTemporary result(this, Reuse, op1, op2);
2686
2687         GPRReg gpr1 = op1.gpr();
2688         GPRReg gpr2 = op2.gpr();
2689         GPRReg gprResult = result.gpr();
2690
2691         if (!shouldCheckOverflow(node->arithMode())) {
2692             if (gpr1 == gprResult)
2693                 m_jit.add32(gpr2, gprResult);
2694             else {
2695                 m_jit.move(gpr2, gprResult);
2696                 m_jit.add32(gpr1, gprResult);
2697             }
2698         } else {
2699             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
2700                 
2701             if (gpr1 == gprResult)
2702                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
2703             else if (gpr2 == gprResult)
2704                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
2705             else
2706                 speculationCheck(Overflow, JSValueRegs(), 0, check);
2707         }
2708
2709         int32Result(gprResult, node);
2710         return;
2711     }
2712         
2713 #if USE(JSVALUE64)
2714     case Int52RepUse: {
2715         ASSERT(shouldCheckOverflow(node->arithMode()));
2716         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2717
2718         // Will we need an overflow check? If we can prove that neither input can be
2719         // Int52 then the overflow check will not be necessary.
2720         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2721             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2722             SpeculateWhicheverInt52Operand op1(this, node->child1());
2723             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2724             GPRTemporary result(this, Reuse, op1);
2725             m_jit.move(op1.gpr(), result.gpr());
2726             m_jit.add64(op2.gpr(), result.gpr());
2727             int52Result(result.gpr(), node, op1.format());
2728             return;
2729         }
2730         
2731         SpeculateInt52Operand op1(this, node->child1());
2732         SpeculateInt52Operand op2(this, node->child2());
2733         GPRTemporary result(this);
2734         m_jit.move(op1.gpr(), result.gpr());
2735         speculationCheck(
2736             Int52Overflow, JSValueRegs(), 0,
2737             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2738         int52Result(result.gpr(), node);
2739         return;
2740     }
2741 #endif // USE(JSVALUE64)
2742     
2743     case DoubleRepUse: {
2744         SpeculateDoubleOperand op1(this, node->child1());
2745         SpeculateDoubleOperand op2(this, node->child2());
2746         FPRTemporary result(this, op1, op2);
2747
2748         FPRReg reg1 = op1.fpr();
2749         FPRReg reg2 = op2.fpr();
2750         m_jit.addDouble(reg1, reg2, result.fpr());
2751
2752         doubleResult(result.fpr(), node);
2753         return;
2754     }
2755         
2756     default:
2757         RELEASE_ASSERT_NOT_REACHED();
2758         break;
2759     }
2760 }
2761
2762 void SpeculativeJIT::compileMakeRope(Node* node)
2763 {
2764     ASSERT(node->child1().useKind() == KnownStringUse);
2765     ASSERT(node->child2().useKind() == KnownStringUse);
2766     ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
2767     
2768     SpeculateCellOperand op1(this, node->child1());
2769     SpeculateCellOperand op2(this, node->child2());
2770     SpeculateCellOperand op3(this, node->child3());
2771     GPRTemporary result(this);
2772     GPRTemporary allocator(this);
2773     GPRTemporary scratch(this);
2774     
2775     GPRReg opGPRs[3];
2776     unsigned numOpGPRs;
2777     opGPRs[0] = op1.gpr();
2778     opGPRs[1] = op2.gpr();
2779     if (node->child3()) {
2780         opGPRs[2] = op3.gpr();
2781         numOpGPRs = 3;
2782     } else {
2783         opGPRs[2] = InvalidGPRReg;
2784         numOpGPRs = 2;
2785     }
2786     GPRReg resultGPR = result.gpr();
2787     GPRReg allocatorGPR = allocator.gpr();
2788     GPRReg scratchGPR = scratch.gpr();
2789     
2790     JITCompiler::JumpList slowPath;
2791     MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithImmortalStructureDestructor(sizeof(JSRopeString));
2792     m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
2793     emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
2794         
2795     m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
2796     for (unsigned i = 0; i < numOpGPRs; ++i)
2797         m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2798     for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
2799         m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2800     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
2801     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
2802     if (!ASSERT_DISABLED) {
2803         JITCompiler::Jump ok = m_jit.branch32(
2804             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2805         m_jit.abortWithReason(DFGNegativeStringLength);
2806         ok.link(&m_jit);
2807     }
2808     for (unsigned i = 1; i < numOpGPRs; ++i) {
2809         m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
2810         speculationCheck(
2811             Uncountable, JSValueSource(), nullptr,
2812             m_jit.branchAdd32(
2813                 JITCompiler::Overflow,
2814                 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
2815     }
2816     m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
2817     m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
2818     if (!ASSERT_DISABLED) {
2819         JITCompiler::Jump ok = m_jit.branch32(
2820             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2821         m_jit.abortWithReason(DFGNegativeStringLength);
2822         ok.link(&m_jit);
2823     }
2824     m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
2825     
2826     switch (numOpGPRs) {
2827     case 2:
2828         addSlowPathGenerator(slowPathCall(
2829             slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
2830         break;
2831     case 3:
2832         addSlowPathGenerator(slowPathCall(
2833             slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
2834         break;
2835     default:
2836         RELEASE_ASSERT_NOT_REACHED();
2837         break;
2838     }
2839         
2840     cellResult(resultGPR, node);
2841 }
2842
2843 void SpeculativeJIT::compileArithSub(Node* node)
2844 {
2845     switch (node->binaryUseKind()) {
2846     case Int32Use: {
2847         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2848         
2849         if (node->child2()->isNumberConstant()) {
2850             SpeculateInt32Operand op1(this, node->child1());
2851             int32_t imm2 = node->child2()->asInt32();
2852             GPRTemporary result(this);
2853
2854             if (!shouldCheckOverflow(node->arithMode())) {
2855                 m_jit.move(op1.gpr(), result.gpr());
2856                 m_jit.sub32(Imm32(imm2), result.gpr());
2857             } else {
2858                 GPRTemporary scratch(this);
2859                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
2860             }
2861
2862             int32Result(result.gpr(), node);
2863             return;
2864         }
2865             
2866         if (node->child1()->isNumberConstant()) {
2867             int32_t imm1 = node->child1()->asInt32();
2868             SpeculateInt32Operand op2(this, node->child2());
2869             GPRTemporary result(this);
2870                 
2871             m_jit.move(Imm32(imm1), result.gpr());
2872             if (!shouldCheckOverflow(node->arithMode()))
2873                 m_jit.sub32(op2.gpr(), result.gpr());
2874             else
2875                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2876                 
2877             int32Result(result.gpr(), node);
2878             return;
2879         }
2880             
2881         SpeculateInt32Operand op1(this, node->child1());
2882         SpeculateInt32Operand op2(this, node->child2());
2883         GPRTemporary result(this);
2884
2885         if (!shouldCheckOverflow(node->arithMode())) {
2886             m_jit.move(op1.gpr(), result.gpr());
2887             m_jit.sub32(op2.gpr(), result.gpr());
2888         } else
2889             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
2890
2891         int32Result(result.gpr(), node);
2892         return;
2893     }
2894         
2895 #if USE(JSVALUE64)
2896     case Int52RepUse: {
2897         ASSERT(shouldCheckOverflow(node->arithMode()));
2898         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2899
2900         // Will we need an overflow check? If we can prove that neither input can be
2901         // Int52 then the overflow check will not be necessary.
2902         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2903             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2904             SpeculateWhicheverInt52Operand op1(this, node->child1());
2905             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2906             GPRTemporary result(this, Reuse, op1);
2907             m_jit.move(op1.gpr(), result.gpr());
2908             m_jit.sub64(op2.gpr(), result.gpr());
2909             int52Result(result.gpr(), node, op1.format());
2910             return;
2911         }
2912         
2913         SpeculateInt52Operand op1(this, node->child1());
2914         SpeculateInt52Operand op2(this, node->child2());
2915         GPRTemporary result(this);
2916         m_jit.move(op1.gpr(), result.gpr());
2917         speculationCheck(
2918             Int52Overflow, JSValueRegs(), 0,
2919             m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2920         int52Result(result.gpr(), node);
2921         return;
2922     }
2923 #endif // USE(JSVALUE64)
2924
2925     case DoubleRepUse: {
2926         SpeculateDoubleOperand op1(this, node->child1());
2927         SpeculateDoubleOperand op2(this, node->child2());
2928         FPRTemporary result(this, op1);
2929
2930         FPRReg reg1 = op1.fpr();
2931         FPRReg reg2 = op2.fpr();
2932         m_jit.subDouble(reg1, reg2, result.fpr());
2933
2934         doubleResult(result.fpr(), node);
2935         return;
2936     }
2937         
2938     default:
2939         RELEASE_ASSERT_NOT_REACHED();
2940         return;
2941     }
2942 }
2943
2944 void SpeculativeJIT::compileArithNegate(Node* node)
2945 {
2946     switch (node->child1().useKind()) {
2947     case Int32Use: {
2948         SpeculateInt32Operand op1(this, node->child1());
2949         GPRTemporary result(this);
2950
2951         m_jit.move(op1.gpr(), result.gpr());
2952
2953         // Note: there is no notion of being not used as a number, but someone
2954         // caring about negative zero.
2955         
2956         if (!shouldCheckOverflow(node->arithMode()))
2957             m_jit.neg32(result.gpr());
2958         else if (!shouldCheckNegativeZero(node->arithMode()))
2959             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
2960         else {
2961             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
2962             m_jit.neg32(result.gpr());
2963         }
2964
2965         int32Result(result.gpr(), node);
2966         return;
2967     }
2968
2969 #if USE(JSVALUE64)
2970     case Int52RepUse: {
2971         ASSERT(shouldCheckOverflow(node->arithMode()));
2972         
2973         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)) {
2974             SpeculateWhicheverInt52Operand op1(this, node->child1());
2975             GPRTemporary result(this);
2976             GPRReg op1GPR = op1.gpr();
2977             GPRReg resultGPR = result.gpr();
2978             m_jit.move(op1GPR, resultGPR);
2979             m_jit.neg64(resultGPR);
2980             if (shouldCheckNegativeZero(node->arithMode())) {
2981                 speculationCheck(
2982                     NegativeZero, JSValueRegs(), 0,
2983                     m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
2984             }
2985             int52Result(resultGPR, node, op1.format());
2986             return;
2987         }
2988         
2989         SpeculateInt52Operand op1(this, node->child1());
2990         GPRTemporary result(this);
2991         GPRReg op1GPR = op1.gpr();
2992         GPRReg resultGPR = result.gpr();
2993         m_jit.move(op1GPR, resultGPR);
2994         speculationCheck(
2995             Int52Overflow, JSValueRegs(), 0,
2996             m_jit.branchNeg64(MacroAssembler::Overflow, resultGPR));
2997         if (shouldCheckNegativeZero(node->arithMode())) {
2998             speculationCheck(
2999                 NegativeZero, JSValueRegs(), 0,
3000                 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3001         }
3002         int52Result(resultGPR, node);
3003         return;
3004     }
3005 #endif // USE(JSVALUE64)
3006         
3007     case DoubleRepUse: {
3008         SpeculateDoubleOperand op1(this, node->child1());
3009         FPRTemporary result(this);
3010         
3011         m_jit.negateDouble(op1.fpr(), result.fpr());
3012         
3013         doubleResult(result.fpr(), node);
3014         return;
3015     }
3016         
3017     default:
3018         RELEASE_ASSERT_NOT_REACHED();
3019         return;
3020     }
3021 }
3022 void SpeculativeJIT::compileArithMul(Node* node)
3023 {
3024     switch (node->binaryUseKind()) {
3025     case Int32Use: {
3026         SpeculateInt32Operand op1(this, node->child1());
3027         SpeculateInt32Operand op2(this, node->child2());
3028         GPRTemporary result(this);
3029
3030         GPRReg reg1 = op1.gpr();
3031         GPRReg reg2 = op2.gpr();
3032
3033         // We can perform truncated multiplications if we get to this point, because if the
3034         // fixup phase could not prove that it would be safe, it would have turned us into
3035         // a double multiplication.
3036         if (!shouldCheckOverflow(node->arithMode())) {
3037             m_jit.move(reg1, result.gpr());
3038             m_jit.mul32(reg2, result.gpr());
3039         } else {
3040             speculationCheck(
3041                 Overflow, JSValueRegs(), 0,
3042                 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3043         }
3044             
3045         // Check for negative zero, if the users of this node care about such things.
3046         if (shouldCheckNegativeZero(node->arithMode())) {
3047             MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3048             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
3049             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
3050             resultNonZero.link(&m_jit);
3051         }
3052
3053         int32Result(result.gpr(), node);
3054         return;
3055     }
3056     
3057 #if USE(JSVALUE64)   
3058     case Int52RepUse: {
3059         ASSERT(shouldCheckOverflow(node->arithMode()));
3060         
3061         // This is super clever. We want to do an int52 multiplication and check the
3062         // int52 overflow bit. There is no direct hardware support for this, but we do
3063         // have the ability to do an int64 multiplication and check the int64 overflow
3064         // bit. We leverage that. Consider that a, b are int52 numbers inside int64
3065         // registers, with the high 12 bits being sign-extended. We can do:
3066         //
3067         //     (a * (b << 12))
3068         //
3069         // This will give us a left-shifted int52 (value is in high 52 bits, low 16
3070         // bits are zero) plus the int52 overflow bit. I.e. whether this 64-bit
3071         // multiplication overflows is identical to whether the 'a * b' 52-bit
3072         // multiplication overflows.
3073         //
3074         // In our nomenclature, this is:
3075         //
3076         //     strictInt52(a) * int52(b) => int52
3077         //
3078         // That is "strictInt52" means unshifted and "int52" means left-shifted by 16
3079         // bits.
3080         //
3081         // We don't care which of op1 or op2 serves as the left-shifted operand, so
3082         // we just do whatever is more convenient for op1 and have op2 do the
3083         // opposite. This ensures that we do at most one shift.
3084
3085         SpeculateWhicheverInt52Operand op1(this, node->child1());
3086         SpeculateWhicheverInt52Operand op2(this, node->child2(), OppositeShift, op1);
3087         GPRTemporary result(this);
3088         
3089         GPRReg op1GPR = op1.gpr();
3090         GPRReg op2GPR = op2.gpr();
3091         GPRReg resultGPR = result.gpr();
3092         
3093         m_jit.move(op1GPR, resultGPR);
3094         speculationCheck(
3095             Int52Overflow, JSValueRegs(), 0,
3096             m_jit.branchMul64(MacroAssembler::Overflow, op2GPR, resultGPR));
3097         
3098         if (shouldCheckNegativeZero(node->arithMode())) {
3099             MacroAssembler::Jump resultNonZero = m_jit.branchTest64(
3100                 MacroAssembler::NonZero, resultGPR);
3101             speculationCheck(
3102                 NegativeZero, JSValueRegs(), 0,
3103                 m_jit.branch64(MacroAssembler::LessThan, op1GPR, TrustedImm64(0)));
3104             speculationCheck(
3105                 NegativeZero, JSValueRegs(), 0,
3106                 m_jit.branch64(MacroAssembler::LessThan, op2GPR, TrustedImm64(0)));
3107             resultNonZero.link(&m_jit);
3108         }
3109         
3110         int52Result(resultGPR, node);
3111         return;
3112     }
3113 #endif // USE(JSVALUE64)
3114         
3115     case DoubleRepUse: {
3116         SpeculateDoubleOperand op1(this, node->child1());
3117         SpeculateDoubleOperand op2(this, node->child2());
3118         FPRTemporary result(this, op1, op2);
3119         
3120         FPRReg reg1 = op1.fpr();
3121         FPRReg reg2 = op2.fpr();
3122         
3123         m_jit.mulDouble(reg1, reg2, result.fpr());
3124         
3125         doubleResult(result.fpr(), node);
3126         return;
3127     }
3128         
3129     default:
3130         RELEASE_ASSERT_NOT_REACHED();
3131         return;
3132     }
3133 }
3134
3135 void SpeculativeJIT::compileArithDiv(Node* node)
3136 {
3137     switch (node->binaryUseKind()) {
3138     case Int32Use: {
3139 #if CPU(X86) || CPU(X86_64)
3140         SpeculateInt32Operand op1(this, node->child1());
3141         SpeculateInt32Operand op2(this, node->child2());
3142         GPRTemporary eax(this, X86Registers::eax);
3143         GPRTemporary edx(this, X86Registers::edx);
3144         GPRReg op1GPR = op1.gpr();
3145         GPRReg op2GPR = op2.gpr();
3146     
3147         GPRReg op2TempGPR;
3148         GPRReg temp;
3149         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3150             op2TempGPR = allocate();
3151             temp = op2TempGPR;
3152         } else {
3153             op2TempGPR = InvalidGPRReg;
3154             if (op1GPR == X86Registers::eax)
3155                 temp = X86Registers::edx;
3156             else
3157                 temp = X86Registers::eax;
3158         }
3159     
3160         ASSERT(temp != op1GPR);
3161         ASSERT(temp != op2GPR);
3162     
3163         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3164     
3165         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3166     
3167         JITCompiler::JumpList done;
3168         if (shouldCheckOverflow(node->arithMode())) {
3169             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3170             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3171         } else {
3172             // This is the case where we convert the result to an int after we're done, and we
3173             // already know that the denominator is either -1 or 0. So, if the denominator is
3174             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3175             // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
3176             // are happy to fall through to a normal division, since we're just dividing
3177             // something by negative 1.
3178         
3179             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3180             m_jit.move(TrustedImm32(0), eax.gpr());
3181             done.append(m_jit.jump());
3182         
3183             notZero.link(&m_jit);
3184             JITCompiler::Jump notNeg2ToThe31 =
3185                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3186             m_jit.zeroExtend32ToPtr(op1GPR, eax.gpr());
3187             done.append(m_jit.jump());
3188         
3189             notNeg2ToThe31.link(&m_jit);
3190         }
3191     
3192         safeDenominator.link(&m_jit);
3193     
3194         // If the user cares about negative zero, then speculate that we're not about
3195         // to produce negative zero.
3196         if (shouldCheckNegativeZero(node->arithMode())) {
3197             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3198             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3199             numeratorNonZero.link(&m_jit);
3200         }
3201     
3202         if (op2TempGPR != InvalidGPRReg) {
3203             m_jit.move(op2GPR, op2TempGPR);
3204             op2GPR = op2TempGPR;
3205         }
3206             
3207         m_jit.move(op1GPR, eax.gpr());
3208         m_jit.assembler().cdq();
3209         m_jit.assembler().idivl_r(op2GPR);
3210             
3211         if (op2TempGPR != InvalidGPRReg)
3212             unlock(op2TempGPR);
3213
3214         // Check that there was no remainder. If there had been, then we'd be obligated to
3215         // produce a double result instead.
3216         if (shouldCheckOverflow(node->arithMode()))
3217             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
3218         
3219         done.link(&m_jit);
3220         int32Result(eax.gpr(), node);
3221 #elif CPU(APPLE_ARMV7S) || CPU(ARM64)
3222         SpeculateInt32Operand op1(this, node->child1());
3223         SpeculateInt32Operand op2(this, node->child2());
3224         GPRReg op1GPR = op1.gpr();
3225         GPRReg op2GPR = op2.gpr();
3226         GPRTemporary quotient(this);
3227         GPRTemporary multiplyAnswer(this);
3228
3229         // If the user cares about negative zero, then speculate that we're not about
3230         // to produce negative zero.
3231         if (shouldCheckNegativeZero(node->arithMode())) {
3232             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3233             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3234             numeratorNonZero.link(&m_jit);
3235         }
3236
3237         m_jit.assembler().sdiv<32>(quotient.gpr(), op1GPR, op2GPR);
3238
3239         // Check that there was no remainder. If there had been, then we'd be obligated to
3240         // produce a double result instead.
3241         if (shouldCheckOverflow(node->arithMode())) {
3242             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
3243             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
3244         }
3245
3246         int32Result(quotient.gpr(), node);
3247 #else
3248         RELEASE_ASSERT_NOT_REACHED();
3249 #endif
3250         break;
3251     }
3252         
3253     case DoubleRepUse: {
3254         SpeculateDoubleOperand op1(this, node->child1());
3255         SpeculateDoubleOperand op2(this, node->child2());
3256         FPRTemporary result(this, op1);
3257         
3258         FPRReg reg1 = op1.fpr();
3259         FPRReg reg2 = op2.fpr();
3260         m_jit.divDouble(reg1, reg2, result.fpr());
3261         
3262         doubleResult(result.fpr(), node);
3263         break;
3264     }
3265         
3266     default:
3267         RELEASE_ASSERT_NOT_REACHED();
3268         break;
3269     }
3270 }
3271
3272 void SpeculativeJIT::compileArithMod(Node* node)
3273 {
3274     switch (node->binaryUseKind()) {
3275     case Int32Use: {
3276         // In the fast path, the dividend value could be the final result
3277         // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
3278         SpeculateStrictInt32Operand op1(this, node->child1());
3279         
3280         if (node->child2()->isInt32Constant()) {
3281             int32_t divisor = node->child2()->asInt32();
3282             if (divisor > 1 && hasOneBitSet(divisor)) {
3283                 unsigned logarithm = WTF::fastLog2(divisor);
3284                 GPRReg dividendGPR = op1.gpr();
3285                 GPRTemporary result(this);
3286                 GPRReg resultGPR = result.gpr();
3287
3288                 // This is what LLVM generates. It's pretty crazy. Here's my
3289                 // attempt at understanding it.
3290                 
3291                 // First, compute either divisor - 1, or 0, depending on whether
3292                 // the dividend is negative:
3293                 //
3294                 // If dividend < 0:  resultGPR = divisor - 1
3295                 // If dividend >= 0: resultGPR = 0
3296                 m_jit.move(dividendGPR, resultGPR);
3297                 m_jit.rshift32(TrustedImm32(31), resultGPR);
3298                 m_jit.urshift32(TrustedImm32(32 - logarithm), resultGPR);
3299                 
3300                 // Add in the dividend, so that:
3301                 //
3302                 // If dividend < 0:  resultGPR = dividend + divisor - 1
3303                 // If dividend >= 0: resultGPR = dividend
3304                 m_jit.add32(dividendGPR, resultGPR);
3305                 
3306                 // Mask so as to only get the *high* bits. This rounds down
3307                 // (towards negative infinity) resultGPR to the nearest multiple
3308                 // of divisor, so that:
3309                 //
3310                 // If dividend < 0:  resultGPR = floor((dividend + divisor - 1) / divisor)
3311                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3312                 //
3313                 // Note that this can be simplified to:
3314                 //
3315                 // If dividend < 0:  resultGPR = ceil(dividend / divisor)
3316                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3317                 //
3318                 // Note that if the dividend is negative, resultGPR will also be negative.
3319                 // Regardless of the sign of dividend, resultGPR will be rounded towards
3320                 // zero, because of how things are conditionalized.
3321                 m_jit.and32(TrustedImm32(-divisor), resultGPR);
3322                 
3323                 // Subtract resultGPR from dividendGPR, which yields the remainder:
3324                 //
3325                 // resultGPR = dividendGPR - resultGPR
3326                 m_jit.neg32(resultGPR);
3327                 m_jit.add32(dividendGPR, resultGPR);
3328                 
3329                 if (shouldCheckNegativeZero(node->arithMode())) {
3330                     // Check that we're not about to create negative zero.
3331                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3332                     speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, resultGPR));
3333                     numeratorPositive.link(&m_jit);
3334                 }
3335
3336                 int32Result(resultGPR, node);
3337                 return;
3338             }
3339         }
3340         
3341 #if CPU(X86) || CPU(X86_64)
3342         if (node->child2()->isInt32Constant()) {
3343             int32_t divisor = node->child2()->asInt32();
3344             if (divisor && divisor != -1) {
3345                 GPRReg op1Gpr = op1.gpr();
3346
3347                 GPRTemporary eax(this, X86Registers::eax);
3348                 GPRTemporary edx(this, X86Registers::edx);
3349                 GPRTemporary scratch(this);
3350                 GPRReg scratchGPR = scratch.gpr();
3351
3352                 GPRReg op1SaveGPR;
3353                 if (op1Gpr == X86Registers::eax || op1Gpr == X86Registers::edx) {
3354                     op1SaveGPR = allocate();
3355                     ASSERT(op1Gpr != op1SaveGPR);
3356                     m_jit.move(op1Gpr, op1SaveGPR);
3357                 } else
3358                     op1SaveGPR = op1Gpr;
3359                 ASSERT(op1SaveGPR != X86Registers::eax);
3360                 ASSERT(op1SaveGPR != X86Registers::edx);
3361
3362                 m_jit.move(op1Gpr, eax.gpr());
3363                 m_jit.move(TrustedImm32(divisor), scratchGPR);
3364                 m_jit.assembler().cdq();
3365                 m_jit.assembler().idivl_r(scratchGPR);
3366                 if (shouldCheckNegativeZero(node->arithMode())) {
3367                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3368                     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3369                     numeratorPositive.link(&m_jit);
3370                 }
3371             
3372                 if (op1SaveGPR != op1Gpr)
3373                     unlock(op1SaveGPR);
3374
3375                 int32Result(edx.gpr(), node);
3376                 return;
3377             }
3378         }
3379 #endif
3380
3381         SpeculateInt32Operand op2(this, node->child2());
3382 #if CPU(X86) || CPU(X86_64)
3383         GPRTemporary eax(this, X86Registers::eax);
3384         GPRTemporary edx(this, X86Registers::edx);
3385         GPRReg op1GPR = op1.gpr();
3386         GPRReg op2GPR = op2.gpr();
3387     
3388         GPRReg op2TempGPR;
3389         GPRReg temp;
3390         GPRReg op1SaveGPR;
3391     
3392         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3393             op2TempGPR = allocate();
3394             temp = op2TempGPR;
3395         } else {
3396             op2TempGPR = InvalidGPRReg;
3397             if (op1GPR == X86Registers::eax)
3398                 temp = X86Registers::edx;
3399             else
3400                 temp = X86Registers::eax;
3401         }
3402     
3403         if (op1GPR == X86Registers::eax || op1GPR == X86Registers::edx) {
3404             op1SaveGPR = allocate();
3405             ASSERT(op1GPR != op1SaveGPR);
3406             m_jit.move(op1GPR, op1SaveGPR);
3407         } else
3408             op1SaveGPR = op1GPR;
3409     
3410         ASSERT(temp != op1GPR);
3411         ASSERT(temp != op2GPR);
3412         ASSERT(op1SaveGPR != X86Registers::eax);
3413         ASSERT(op1SaveGPR != X86Registers::edx);
3414     
3415         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3416     
3417         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3418     
3419         JITCompiler::JumpList done;
3420         
3421         // FIXME: -2^31 / -1 will actually yield negative zero, so we could have a
3422         // separate case for that. But it probably doesn't matter so much.
3423         if (shouldCheckOverflow(node->arithMode())) {
3424             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3425             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3426         } else {
3427             // This is the case where we convert the result to an int after we're done, and we
3428             // already know that the denominator is either -1 or 0. So, if the denominator is
3429             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3430             // -1) and the numerator is -2^31 then the result should be 0. Otherwise we are
3431             // happy to fall through to a normal division, since we're just dividing something
3432             // by negative 1.
3433         
3434             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3435             m_jit.move(TrustedImm32(0), edx.gpr());
3436             done.append(m_jit.jump());
3437         
3438             notZero.link(&m_jit);
3439             JITCompiler::Jump notNeg2ToThe31 =
3440                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3441             m_jit.move(TrustedImm32(0), edx.gpr());
3442             done.append(m_jit.jump());
3443         
3444             notNeg2ToThe31.link(&m_jit);
3445         }
3446         
3447         safeDenominator.link(&m_jit);
3448             
3449         if (op2TempGPR != InvalidGPRReg) {
3450             m_jit.move(op2GPR, op2TempGPR);
3451             op2GPR = op2TempGPR;
3452         }
3453             
3454         m_jit.move(op1GPR, eax.gpr());
3455         m_jit.assembler().cdq();
3456         m_jit.assembler().idivl_r(op2GPR);
3457             
3458         if (op2TempGPR != InvalidGPRReg)
3459             unlock(op2TempGPR);
3460
3461         // Check that we're not about to create negative zero.
3462         if (shouldCheckNegativeZero(node->arithMode())) {
3463             JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3464             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3465             numeratorPositive.link(&m_jit);
3466         }
3467     
3468         if (op1SaveGPR != op1GPR)
3469             unlock(op1SaveGPR);
3470             
3471         done.link(&m_jit);
3472         int32Result(edx.gpr(), node);
3473
3474 #elif CPU(ARM64) || CPU(APPLE_ARMV7S)
3475         GPRTemporary temp(this);
3476         GPRTemporary quotientThenRemainder(this);
3477         GPRTemporary multiplyAnswer(this);
3478         GPRReg dividendGPR = op1.gpr();
3479         GPRReg divisorGPR = op2.gpr();
3480         GPRReg quotientThenRemainderGPR = quotientThenRemainder.gpr();
3481         GPRReg multiplyAnswerGPR = multiplyAnswer.gpr();
3482
3483         JITCompiler::JumpList done;
3484     
3485         if (shouldCheckOverflow(node->arithMode()))
3486             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, divisorGPR));
3487         else {
3488             JITCompiler::Jump denominatorNotZero = m_jit.branchTest32(JITCompiler::NonZero, divisorGPR);
3489             m_jit.move(divisorGPR, quotientThenRemainderGPR);
3490             done.append(m_jit.jump());
3491             denominatorNotZero.link(&m_jit);
3492         }
3493
3494         m_jit.assembler().sdiv<32>(quotientThenRemainderGPR, dividendGPR, divisorGPR);
3495         // FIXME: It seems like there are cases where we don't need this? What if we have
3496         // arithMode() == Arith::Unchecked?
3497         // https://bugs.webkit.org/show_bug.cgi?id=126444
3498         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotientThenRemainderGPR, divisorGPR, multiplyAnswerGPR));
3499 #if CPU(APPLE_ARMV7S)
3500         m_jit.assembler().sub(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3501 #else
3502         m_jit.assembler().sub<32>(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3503 #endif
3504
3505         // If the user cares about negative zero, then speculate that we're not about
3506         // to produce negative zero.
3507         if (shouldCheckNegativeZero(node->arithMode())) {
3508             // Check that we're not about to create negative zero.
3509             JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3510             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, quotientThenRemainderGPR));
3511             numeratorPositive.link(&m_jit);
3512         }
3513
3514         done.link(&m_jit);
3515
3516         int32Result(quotientThenRemainderGPR, node);
3517 #else // not architecture that can do integer division
3518         RELEASE_ASSERT_NOT_REACHED();
3519 #endif
3520         return;
3521     }
3522         
3523     case DoubleRepUse: {
3524         SpeculateDoubleOperand op1(this, node->child1());
3525         SpeculateDoubleOperand op2(this, node->child2());
3526         
3527         FPRReg op1FPR = op1.fpr();
3528         FPRReg op2FPR = op2.fpr();
3529         
3530         flushRegisters();
3531         
3532         FPRResult result(this);
3533         
3534         callOperation(fmodAsDFGOperation, result.fpr(), op1FPR, op2FPR);
3535         
3536         doubleResult(result.fpr(), node);
3537         return;
3538     }
3539         
3540     default:
3541         RELEASE_ASSERT_NOT_REACHED();
3542         return;
3543     }
3544 }
3545
3546 // Returns true if the compare is fused with a subsequent branch.
3547 bool SpeculativeJIT::compare(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
3548 {
3549     if (compilePeepHoleBranch(node, condition, doubleCondition, operation))
3550         return true;
3551
3552     if (node->isBinaryUseKind(Int32Use)) {
3553         compileInt32Compare(node, condition);
3554         return false;
3555     }
3556     
3557 #if USE(JSVALUE64)
3558     if (node->isBinaryUseKind(Int52RepUse)) {
3559         compileInt52Compare(node, condition);
3560         return false;
3561     }
3562 #endif // USE(JSVALUE64)
3563     
3564     if (node->isBinaryUseKind(DoubleRepUse)) {
3565         compileDoubleCompare(node, doubleCondition);
3566         return false;
3567     }
3568     
3569     if (node->op() == CompareEq) {
3570         if (node->isBinaryUseKind(StringUse)) {
3571             compileStringEquality(node);
3572             return false;
3573         }
3574         
3575         if (node->isBinaryUseKind(BooleanUse)) {
3576             compileBooleanCompare(node, condition);
3577             return false;
3578         }
3579
3580         if (node->isBinaryUseKind(StringIdentUse)) {
3581             compileStringIdentEquality(node);
3582             return false;
3583         }
3584         
3585         if (node->isBinaryUseKind(ObjectUse)) {
3586             compileObjectEquality(node);
3587             return false;
3588         }
3589         
3590         if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse)) {
3591             compileObjectToObjectOrOtherEquality(node->child1(), node->child2());
3592             return false;
3593         }
3594         
3595         if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse)) {
3596             compileObjectToObjectOrOtherEquality(node->child2(), node->child1());
3597             return false;
3598         }
3599     }
3600     
3601     nonSpeculativeNonPeepholeCompare(node, condition, operation);
3602     return false;
3603 }
3604
3605 bool SpeculativeJIT::compileStrictEq(Node* node)
3606 {
3607     if (node->isBinaryUseKind(BooleanUse)) {
3608         unsigned branchIndexInBlock = detectPeepHoleBranch();
3609         if (branchIndexInBlock != UINT_MAX) {
3610             Node* branchNode = m_block->at(branchIndexInBlock);
3611             compilePeepHoleBooleanBranch(node, branchNode, MacroAssembler::Equal);
3612             use(node->child1());
3613             use(node->child2());
3614             m_indexInBlock = branchIndexInBlock;
3615             m_currentNode = branchNode;
3616             return true;
3617         }
3618         compileBooleanCompare(node, MacroAssembler::Equal);
3619         return false;
3620     }
3621
3622     if (node->isBinaryUseKind(Int32Use)) {
3623         unsigned branchIndexInBlock = detectPeepHoleBranch();
3624         if (branchIndexInBlock != UINT_MAX) {
3625             Node* branchNode = m_block->at(branchIndexInBlock);
3626             compilePeepHoleInt32Branch(node, branchNode, MacroAssembler::Equal);
3627             use(node->child1());
3628             use(node->child2());
3629             m_indexInBlock = branchIndexInBlock;
3630             m_currentNode = branchNode;
3631             return true;
3632         }
3633         compileInt32Compare(node, MacroAssembler::Equal);
3634         return false;
3635     }
3636     
3637 #if USE(JSVALUE64)   
3638     if (node->isBinaryUseKind(Int52RepUse)) {
3639         unsigned branchIndexInBlock = detectPeepHoleBranch();
3640         if (branchIndexInBlock != UINT_MAX) {
3641             Node* branchNode = m_block->at(branchIndexInBlock);
3642             compilePeepHoleInt52Branch(node, branchNode, MacroAssembler::Equal);
3643             use(node->child1());
3644             use(node->child2());
3645             m_indexInBlock = branchIndexInBlock;
3646             m_currentNode = branchNode;
3647             return true;
3648         }
3649         compileInt52Compare(node, MacroAssembler::Equal);
3650         return false;
3651     }
3652 #endif // USE(JSVALUE64)
3653
3654     if (node->isBinaryUseKind(DoubleRepUse)) {
3655         unsigned branchIndexInBlock = detectPeepHoleBranch();
3656         if (branchIndexInBlock != UINT_MAX) {
3657             Node* branchNode = m_block->at(branchIndexInBlock);
3658             compilePeepHoleDoubleBranch(node, branchNode, MacroAssembler::DoubleEqual);
3659             use(node->child1());
3660             use(node->child2());
3661             m_indexInBlock = branchIndexInBlock;
3662             m_currentNode = branchNode;
3663             return true;
3664         }
3665         compileDoubleCompare(node, MacroAssembler::DoubleEqual);
3666         return false;
3667     }
3668     
3669     if (node->isBinaryUseKind(StringUse)) {
3670         compileStringEquality(node);
3671         return false;
3672     }
3673     
3674     if (node->isBinaryUseKind(StringIdentUse)) {
3675         compileStringIdentEquality(node);
3676         return false;
3677     }
3678
3679     if (node->isBinaryUseKind(ObjectUse)) {
3680         unsigned branchIndexInBlock = detectPeepHoleBranch();
3681         if (branchIndexInBlock != UINT_MAX) {
3682             Node* branchNode = m_block->at(branchIndexInBlock);
3683             compilePeepHoleObjectEquality(node, branchNode);
3684             use(node->child1());
3685             use(node->child2());
3686             m_indexInBlock = branchIndexInBlock;
3687             m_currentNode = branchNode;
3688             return true;
3689         }
3690         compileObjectEquality(node);
3691         return false;
3692     }
3693
3694     if (node->isBinaryUseKind(MiscUse, UntypedUse)
3695         || node->isBinaryUseKind(UntypedUse, MiscUse)) {
3696         compileMiscStrictEq(node);
3697         return false;
3698     }
3699     
3700     if (node->isBinaryUseKind(StringIdentUse, NotStringVarUse)) {
3701         compileStringIdentToNotStringVarEquality(node, node->child1(), node->child2());
3702         return false;
3703     }
3704     
3705     if (node->isBinaryUseKind(NotStringVarUse, StringIdentUse)) {
3706         compileStringIdentToNotStringVarEquality(node, node->child2(), node->child1());
3707         return false;
3708     }
3709     
3710     if (node->isBinaryUseKind(StringUse, UntypedUse)) {
3711         compileStringToUntypedEquality(node, node->child1(), node->child2());
3712         return false;
3713     }
3714     
3715     if (node->isBinaryUseKind(UntypedUse, StringUse)) {
3716         compileStringToUntypedEquality(node, node->child2(), node->child1());
3717         return false;
3718     }
3719     
3720     RELEASE_ASSERT(node->isBinaryUseKind(UntypedUse));
3721     return nonSpeculativeStrictEq(node);
3722 }
3723
3724 void SpeculativeJIT::compileBooleanCompare(Node* node, MacroAssembler::RelationalCondition condition)
3725 {
3726     SpeculateBooleanOperand op1(this, node->child1());
3727     SpeculateBooleanOperand op2(this, node->child2());
3728     GPRTemporary result(this);
3729     
3730     m_jit.compare32(condition, op1.gpr(), op2.gpr(), result.gpr());
3731     
3732     unblessedBooleanResult(result.gpr(), node);
3733 }
3734
3735 void SpeculativeJIT::compileStringEquality(
3736     Node* node, GPRReg leftGPR, GPRReg rightGPR, GPRReg lengthGPR, GPRReg leftTempGPR,
3737     GPRReg rightTempGPR, GPRReg leftTemp2GPR, GPRReg rightTemp2GPR,
3738     JITCompiler::JumpList fastTrue, JITCompiler::JumpList fastFalse)
3739 {
3740     JITCompiler::JumpList trueCase;
3741     JITCompiler::JumpList falseCase;
3742     JITCompiler::JumpList slowCase;
3743     
3744     trueCase.append(fastTrue);
3745     falseCase.append(fastFalse);
3746
3747     m_jit.load32(MacroAssembler::Address(leftGPR, JSString::offsetOfLength()), lengthGPR);
3748     
3749     falseCase.append(m_jit.branch32(
3750         MacroAssembler::NotEqual,
3751         MacroAssembler::Address(rightGPR, JSString::offsetOfLength()),
3752         lengthGPR));
3753     
3754     trueCase.append(m_jit.branchTest32(MacroAssembler::Zero, lengthGPR));
3755     
3756     m_jit.loadPtr(MacroAssembler::Address(leftGPR, JSString::offsetOfValue()), leftTempGPR);
3757     m_jit.loadPtr(MacroAssembler::Address(rightGPR, JSString::offsetOfValue()), rightTempGPR);
3758     
3759     slowCase.append(m_jit.branchTestPtr(MacroAssembler::Zero, leftTempGPR));
3760     slowCase.append(m_jit.branchTestPtr(MacroAssembler::Zero, rightTempGPR));
3761     
3762     slowCase.append(m_jit.branchTest32(
3763         MacroAssembler::Zero,
3764         MacroAssembler::Address(leftTempGPR, StringImpl::flagsOffset()),
3765         TrustedImm32(StringImpl::flagIs8Bit())));
3766     slowCase.append(m_jit.branchTest32(
3767         MacroAssembler::Zero,
3768         MacroAssembler::Address(rightTempGPR, StringImpl::flagsOffset()),
3769         TrustedImm32(StringImpl::flagIs8Bit())));
3770     
3771     m_jit.loadPtr(MacroAssembler::Address(leftTempGPR, StringImpl::dataOffset()), leftTempGPR);
3772     m_jit.loadPtr(MacroAssembler::Address(rightTempGPR, StringImpl::dataOffset()), rightTempGPR);
3773     
3774     MacroAssembler::Label loop = m_jit.label();
3775     
3776     m_jit.sub32(TrustedImm32(1), lengthGPR);
3777
3778     // This isn't going to generate the best code on x86. But that's OK, it's still better
3779     // than not inlining.
3780     m_jit.load8(MacroAssembler::BaseIndex(leftTempGPR, lengthGPR, MacroAssembler::TimesOne), leftTemp2GPR);
3781     m_jit.load8(MacroAssembler::BaseIndex(rightTempGPR, lengthGPR, MacroAssembler::TimesOne), rightTemp2GPR);
3782     falseCase.append(m_jit.branch32(MacroAssembler::NotEqual, leftTemp2GPR, rightTemp2GPR));
3783     
3784     m_jit.branchTest32(MacroAssembler::NonZero, lengthGPR).linkTo(loop, &m_jit);
3785     
3786     trueCase.link(&m_jit);
3787     moveTrueTo(leftTempGPR);
3788     
3789     JITCompiler::Jump done = m_jit.jump();
3790
3791     falseCase.link(&m_jit);
3792     moveFalseTo(leftTempGPR);
3793     
3794     done.link(&m_jit);
3795     addSlowPathGenerator(