Constructor returning null should construct an object instead of null
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "Arguments.h"
32 #include "BinarySwitch.h"
33 #include "DFGAbstractInterpreterInlines.h"
34 #include "DFGArrayifySlowPathGenerator.h"
35 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGSaneStringGetByValSlowPathGenerator.h"
38 #include "DFGSlowPathGenerator.h"
39 #include "LinkBuffer.h"
40 #include "JSCInlines.h"
41 #include "ScratchRegisterAllocator.h"
42 #include "WriteBarrierBuffer.h"
43 #include <wtf/MathExtras.h>
44
45 namespace JSC { namespace DFG {
46
47 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
48     : m_compileOkay(true)
49     , m_jit(jit)
50     , m_currentNode(0)
51     , m_lastGeneratedNode(LastNodeType)
52     , m_indexInBlock(0)
53     , m_generationInfo(m_jit.graph().frameRegisterCount())
54     , m_state(m_jit.graph())
55     , m_interpreter(m_jit.graph(), m_state)
56     , m_stream(&jit.jitCode()->variableEventStream)
57     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
58     , m_isCheckingArgumentTypes(false)
59 {
60 }
61
62 SpeculativeJIT::~SpeculativeJIT()
63 {
64 }
65
66 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
67 {
68     ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
69     
70     GPRTemporary scratch(this);
71     GPRTemporary scratch2(this);
72     GPRReg scratchGPR = scratch.gpr();
73     GPRReg scratch2GPR = scratch2.gpr();
74     
75     unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
76     
77     JITCompiler::JumpList slowCases;
78     
79     slowCases.append(
80         emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
81     m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
82     emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
83     
84     m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
85     m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
86     
87     if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
88 #if USE(JSVALUE64)
89         m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
90         for (unsigned i = numElements; i < vectorLength; ++i)
91             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
92 #else
93         EncodedValueDescriptor value;
94         value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
95         for (unsigned i = numElements; i < vectorLength; ++i) {
96             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
97             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
98         }
99 #endif
100     }
101     
102     // I want a slow path that also loads out the storage pointer, and that's
103     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
104     // of work for a very small piece of functionality. :-/
105     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
106             slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
107             structure, numElements));
108 }
109
110 void SpeculativeJIT::emitAllocateArguments(GPRReg resultGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath)
111 {
112     Structure* structure = m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)->argumentsStructure();
113
114     m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), scratchGPR1);
115     m_jit.mul32(TrustedImm32(sizeof(JSValue)), scratchGPR1, scratchGPR1);
116     m_jit.add32(TrustedImm32(Arguments::offsetOfInlineRegisterArray()), scratchGPR1);
117     emitAllocateVariableSizedJSObject<Arguments>(resultGPR, structure, scratchGPR1, scratchGPR1, scratchGPR2, slowPath);
118
119     m_jit.storePtr(TrustedImmPtr(0), MacroAssembler::Address(resultGPR, Arguments::offsetOfActivation()));
120
121     m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), scratchGPR1);
122     m_jit.sub32(TrustedImm32(1), scratchGPR1);
123     m_jit.store32(scratchGPR1, MacroAssembler::Address(resultGPR, Arguments::offsetOfNumArguments()));
124
125     m_jit.store32(TrustedImm32(0), MacroAssembler::Address(resultGPR, Arguments::offsetOfOverrodeLength()));
126     if (m_jit.isStrictModeFor(m_currentNode->origin.semantic))
127         m_jit.store8(TrustedImm32(1), MacroAssembler::Address(resultGPR, Arguments::offsetOfIsStrictMode()));
128
129     m_jit.storePtr(GPRInfo::callFrameRegister, MacroAssembler::Address(resultGPR, Arguments::offsetOfRegisters()));
130     m_jit.storePtr(TrustedImmPtr(0), MacroAssembler::Address(resultGPR, Arguments::offsetOfSlowArgumentData()));
131
132     m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), scratchGPR1);
133     m_jit.storePtr(scratchGPR1, MacroAssembler::Address(resultGPR, Arguments::offsetOfCallee()));
134
135 }
136
137 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
138 {
139     if (!m_compileOkay)
140         return;
141     ASSERT(m_isCheckingArgumentTypes || m_canExit);
142     m_jit.appendExitInfo(jumpToFail);
143     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
144 }
145
146 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
147 {
148     if (!m_compileOkay)
149         return;
150     ASSERT(m_isCheckingArgumentTypes || m_canExit);
151     m_jit.appendExitInfo(jumpsToFail);
152     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
153 }
154
155 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
156 {
157     if (!m_compileOkay)
158         return OSRExitJumpPlaceholder();
159     ASSERT(m_isCheckingArgumentTypes || m_canExit);
160     unsigned index = m_jit.jitCode()->osrExit.size();
161     m_jit.appendExitInfo();
162     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
163     return OSRExitJumpPlaceholder(index);
164 }
165
166 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
167 {
168     ASSERT(m_isCheckingArgumentTypes || m_canExit);
169     return speculationCheck(kind, jsValueSource, nodeUse.node());
170 }
171
172 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
173 {
174     ASSERT(m_isCheckingArgumentTypes || m_canExit);
175     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
176 }
177
178 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
179 {
180     ASSERT(m_isCheckingArgumentTypes || m_canExit);
181     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
182 }
183
184 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
185 {
186     if (!m_compileOkay)
187         return;
188     ASSERT(m_isCheckingArgumentTypes || m_canExit);
189     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
190     m_jit.appendExitInfo(jumpToFail);
191     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
192 }
193
194 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
195 {
196     ASSERT(m_isCheckingArgumentTypes || m_canExit);
197     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
198 }
199
200 void SpeculativeJIT::emitInvalidationPoint(Node* node)
201 {
202     if (!m_compileOkay)
203         return;
204     ASSERT(m_canExit);
205     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
206     m_jit.jitCode()->appendOSRExit(OSRExit(
207         UncountableInvalidation, JSValueSource(),
208         m_jit.graph().methodOfGettingAValueProfileFor(node),
209         this, m_stream->size()));
210     info.m_replacementSource = m_jit.watchpointLabel();
211     ASSERT(info.m_replacementSource.isSet());
212     noResult(node);
213 }
214
215 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
216 {
217     ASSERT(m_isCheckingArgumentTypes || m_canExit);
218     if (!m_compileOkay)
219         return;
220     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
221     m_compileOkay = false;
222     if (verboseCompilationEnabled())
223         dataLog("Bailing compilation.\n");
224 }
225
226 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
227 {
228     ASSERT(m_isCheckingArgumentTypes || m_canExit);
229     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
230 }
231
232 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
233 {
234     ASSERT(needsTypeCheck(edge, typesPassedThrough));
235     m_interpreter.filter(edge, typesPassedThrough);
236     speculationCheck(BadType, source, edge.node(), jumpToFail);
237 }
238
239 RegisterSet SpeculativeJIT::usedRegisters()
240 {
241     RegisterSet result;
242     
243     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
244         GPRReg gpr = GPRInfo::toRegister(i);
245         if (m_gprs.isInUse(gpr))
246             result.set(gpr);
247     }
248     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
249         FPRReg fpr = FPRInfo::toRegister(i);
250         if (m_fprs.isInUse(fpr))
251             result.set(fpr);
252     }
253     
254     result.merge(RegisterSet::specialRegisters());
255     
256     return result;
257 }
258
259 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
260 {
261     m_slowPathGenerators.append(WTF::move(slowPathGenerator));
262 }
263
264 void SpeculativeJIT::runSlowPathGenerators()
265 {
266     for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
267         m_slowPathGenerators[i]->generate(this);
268 }
269
270 // On Windows we need to wrap fmod; on other platforms we can call it directly.
271 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
272 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
273 static double JIT_OPERATION fmodAsDFGOperation(double x, double y)
274 {
275     return fmod(x, y);
276 }
277 #else
278 #define fmodAsDFGOperation fmod
279 #endif
280
281 void SpeculativeJIT::clearGenerationInfo()
282 {
283     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
284         m_generationInfo[i] = GenerationInfo();
285     m_gprs = RegisterBank<GPRInfo>();
286     m_fprs = RegisterBank<FPRInfo>();
287 }
288
289 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
290 {
291     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
292     Node* node = info.node();
293     DataFormat registerFormat = info.registerFormat();
294     ASSERT(registerFormat != DataFormatNone);
295     ASSERT(registerFormat != DataFormatDouble);
296         
297     SilentSpillAction spillAction;
298     SilentFillAction fillAction;
299         
300     if (!info.needsSpill())
301         spillAction = DoNothingForSpill;
302     else {
303 #if USE(JSVALUE64)
304         ASSERT(info.gpr() == source);
305         if (registerFormat == DataFormatInt32)
306             spillAction = Store32Payload;
307         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
308             spillAction = StorePtr;
309         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
310             spillAction = Store64;
311         else {
312             ASSERT(registerFormat & DataFormatJS);
313             spillAction = Store64;
314         }
315 #elif USE(JSVALUE32_64)
316         if (registerFormat & DataFormatJS) {
317             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
318             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
319         } else {
320             ASSERT(info.gpr() == source);
321             spillAction = Store32Payload;
322         }
323 #endif
324     }
325         
326     if (registerFormat == DataFormatInt32) {
327         ASSERT(info.gpr() == source);
328         ASSERT(isJSInt32(info.registerFormat()));
329         if (node->hasConstant()) {
330             ASSERT(node->isInt32Constant());
331             fillAction = SetInt32Constant;
332         } else
333             fillAction = Load32Payload;
334     } else if (registerFormat == DataFormatBoolean) {
335 #if USE(JSVALUE64)
336         RELEASE_ASSERT_NOT_REACHED();
337 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
338         fillAction = DoNothingForFill;
339 #endif
340 #elif USE(JSVALUE32_64)
341         ASSERT(info.gpr() == source);
342         if (node->hasConstant()) {
343             ASSERT(node->isBooleanConstant());
344             fillAction = SetBooleanConstant;
345         } else
346             fillAction = Load32Payload;
347 #endif
348     } else if (registerFormat == DataFormatCell) {
349         ASSERT(info.gpr() == source);
350         if (node->hasConstant()) {
351             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
352             node->asCell(); // To get the assertion.
353             fillAction = SetCellConstant;
354         } else {
355 #if USE(JSVALUE64)
356             fillAction = LoadPtr;
357 #else
358             fillAction = Load32Payload;
359 #endif
360         }
361     } else if (registerFormat == DataFormatStorage) {
362         ASSERT(info.gpr() == source);
363         fillAction = LoadPtr;
364     } else if (registerFormat == DataFormatInt52) {
365         if (node->hasConstant())
366             fillAction = SetInt52Constant;
367         else if (info.spillFormat() == DataFormatInt52)
368             fillAction = Load64;
369         else if (info.spillFormat() == DataFormatStrictInt52)
370             fillAction = Load64ShiftInt52Left;
371         else if (info.spillFormat() == DataFormatNone)
372             fillAction = Load64;
373         else {
374             RELEASE_ASSERT_NOT_REACHED();
375 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
376             fillAction = Load64; // Make GCC happy.
377 #endif
378         }
379     } else if (registerFormat == DataFormatStrictInt52) {
380         if (node->hasConstant())
381             fillAction = SetStrictInt52Constant;
382         else if (info.spillFormat() == DataFormatInt52)
383             fillAction = Load64ShiftInt52Right;
384         else if (info.spillFormat() == DataFormatStrictInt52)
385             fillAction = Load64;
386         else if (info.spillFormat() == DataFormatNone)
387             fillAction = Load64;
388         else {
389             RELEASE_ASSERT_NOT_REACHED();
390 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
391             fillAction = Load64; // Make GCC happy.
392 #endif
393         }
394     } else {
395         ASSERT(registerFormat & DataFormatJS);
396 #if USE(JSVALUE64)
397         ASSERT(info.gpr() == source);
398         if (node->hasConstant()) {
399             if (node->isCellConstant())
400                 fillAction = SetTrustedJSConstant;
401             else
402                 fillAction = SetJSConstant;
403         } else if (info.spillFormat() == DataFormatInt32) {
404             ASSERT(registerFormat == DataFormatJSInt32);
405             fillAction = Load32PayloadBoxInt;
406         } else
407             fillAction = Load64;
408 #else
409         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
410         if (node->hasConstant())
411             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
412         else if (info.payloadGPR() == source)
413             fillAction = Load32Payload;
414         else { // Fill the Tag
415             switch (info.spillFormat()) {
416             case DataFormatInt32:
417                 ASSERT(registerFormat == DataFormatJSInt32);
418                 fillAction = SetInt32Tag;
419                 break;
420             case DataFormatCell:
421                 ASSERT(registerFormat == DataFormatJSCell);
422                 fillAction = SetCellTag;
423                 break;
424             case DataFormatBoolean:
425                 ASSERT(registerFormat == DataFormatJSBoolean);
426                 fillAction = SetBooleanTag;
427                 break;
428             default:
429                 fillAction = Load32Tag;
430                 break;
431             }
432         }
433 #endif
434     }
435         
436     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
437 }
438     
439 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
440 {
441     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
442     Node* node = info.node();
443     ASSERT(info.registerFormat() == DataFormatDouble);
444
445     SilentSpillAction spillAction;
446     SilentFillAction fillAction;
447         
448     if (!info.needsSpill())
449         spillAction = DoNothingForSpill;
450     else {
451         ASSERT(!node->hasConstant());
452         ASSERT(info.spillFormat() == DataFormatNone);
453         ASSERT(info.fpr() == source);
454         spillAction = StoreDouble;
455     }
456         
457 #if USE(JSVALUE64)
458     if (node->hasConstant()) {
459         node->asNumber(); // To get the assertion.
460         fillAction = SetDoubleConstant;
461     } else {
462         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
463         fillAction = LoadDouble;
464     }
465 #elif USE(JSVALUE32_64)
466     ASSERT(info.registerFormat() == DataFormatDouble);
467     if (node->hasConstant()) {
468         node->asNumber(); // To get the assertion.
469         fillAction = SetDoubleConstant;
470     } else
471         fillAction = LoadDouble;
472 #endif
473
474     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
475 }
476     
477 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
478 {
479     switch (plan.spillAction()) {
480     case DoNothingForSpill:
481         break;
482     case Store32Tag:
483         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
484         break;
485     case Store32Payload:
486         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
487         break;
488     case StorePtr:
489         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
490         break;
491 #if USE(JSVALUE64)
492     case Store64:
493         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
494         break;
495 #endif
496     case StoreDouble:
497         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
498         break;
499     default:
500         RELEASE_ASSERT_NOT_REACHED();
501     }
502 }
503     
504 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
505 {
506 #if USE(JSVALUE32_64)
507     UNUSED_PARAM(canTrample);
508 #endif
509     switch (plan.fillAction()) {
510     case DoNothingForFill:
511         break;
512     case SetInt32Constant:
513         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
514         break;
515 #if USE(JSVALUE64)
516     case SetInt52Constant:
517         m_jit.move(Imm64(plan.node()->asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
518         break;
519     case SetStrictInt52Constant:
520         m_jit.move(Imm64(plan.node()->asMachineInt()), plan.gpr());
521         break;
522 #endif // USE(JSVALUE64)
523     case SetBooleanConstant:
524         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
525         break;
526     case SetCellConstant:
527         m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
528         break;
529 #if USE(JSVALUE64)
530     case SetTrustedJSConstant:
531         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
532         break;
533     case SetJSConstant:
534         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
535         break;
536     case SetDoubleConstant:
537         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
538         m_jit.move64ToDouble(canTrample, plan.fpr());
539         break;
540     case Load32PayloadBoxInt:
541         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
542         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
543         break;
544     case Load32PayloadConvertToInt52:
545         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
546         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
547         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
548         break;
549     case Load32PayloadSignExtend:
550         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
551         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
552         break;
553 #else
554     case SetJSConstantTag:
555         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
556         break;
557     case SetJSConstantPayload:
558         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
559         break;
560     case SetInt32Tag:
561         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
562         break;
563     case SetCellTag:
564         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
565         break;
566     case SetBooleanTag:
567         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
568         break;
569     case SetDoubleConstant:
570         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
571         break;
572 #endif
573     case Load32Tag:
574         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
575         break;
576     case Load32Payload:
577         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
578         break;
579     case LoadPtr:
580         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
581         break;
582 #if USE(JSVALUE64)
583     case Load64:
584         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
585         break;
586     case Load64ShiftInt52Right:
587         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
588         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
589         break;
590     case Load64ShiftInt52Left:
591         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
592         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
593         break;
594 #endif
595     case LoadDouble:
596         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
597         break;
598     default:
599         RELEASE_ASSERT_NOT_REACHED();
600     }
601 }
602     
603 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
604 {
605     switch (arrayMode.arrayClass()) {
606     case Array::OriginalArray: {
607         CRASH();
608 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
609         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
610         return result;
611 #endif
612     }
613         
614     case Array::Array:
615         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
616         return m_jit.branch32(
617             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
618         
619     case Array::NonArray:
620     case Array::OriginalNonArray:
621         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
622         return m_jit.branch32(
623             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
624         
625     case Array::PossiblyArray:
626         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
627         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
628     }
629     
630     RELEASE_ASSERT_NOT_REACHED();
631     return JITCompiler::Jump();
632 }
633
634 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
635 {
636     JITCompiler::JumpList result;
637     
638     switch (arrayMode.type()) {
639     case Array::Int32:
640         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
641
642     case Array::Double:
643         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
644
645     case Array::Contiguous:
646         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
647
648     case Array::ArrayStorage:
649     case Array::SlowPutArrayStorage: {
650         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
651         
652         if (arrayMode.isJSArray()) {
653             if (arrayMode.isSlowPut()) {
654                 result.append(
655                     m_jit.branchTest32(
656                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
657                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
658                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
659                 result.append(
660                     m_jit.branch32(
661                         MacroAssembler::Above, tempGPR,
662                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
663                 break;
664             }
665             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
666             result.append(
667                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
668             break;
669         }
670         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
671         if (arrayMode.isSlowPut()) {
672             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
673             result.append(
674                 m_jit.branch32(
675                     MacroAssembler::Above, tempGPR,
676                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
677             break;
678         }
679         result.append(
680             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
681         break;
682     }
683     default:
684         CRASH();
685         break;
686     }
687     
688     return result;
689 }
690
691 void SpeculativeJIT::checkArray(Node* node)
692 {
693     ASSERT(node->arrayMode().isSpecific());
694     ASSERT(!node->arrayMode().doesConversion());
695     
696     SpeculateCellOperand base(this, node->child1());
697     GPRReg baseReg = base.gpr();
698     
699     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
700         noResult(m_currentNode);
701         return;
702     }
703     
704     const ClassInfo* expectedClassInfo = 0;
705     
706     switch (node->arrayMode().type()) {
707     case Array::String:
708         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
709         break;
710     case Array::Int32:
711     case Array::Double:
712     case Array::Contiguous:
713     case Array::ArrayStorage:
714     case Array::SlowPutArrayStorage: {
715         GPRTemporary temp(this);
716         GPRReg tempGPR = temp.gpr();
717         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
718         speculationCheck(
719             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
720             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
721         
722         noResult(m_currentNode);
723         return;
724     }
725     case Array::Arguments:
726         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ArgumentsType);
727
728         noResult(m_currentNode);
729         return;
730     default:
731         speculateCellTypeWithoutTypeFiltering(
732             node->child1(), baseReg,
733             typeForTypedArrayType(node->arrayMode().typedArrayType()));
734         noResult(m_currentNode);
735         return;
736     }
737     
738     RELEASE_ASSERT(expectedClassInfo);
739     
740     GPRTemporary temp(this);
741     GPRTemporary temp2(this);
742     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
743     speculationCheck(
744         BadType, JSValueSource::unboxedCell(baseReg), node,
745         m_jit.branchPtr(
746             MacroAssembler::NotEqual,
747             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
748             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
749     
750     noResult(m_currentNode);
751 }
752
753 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
754 {
755     ASSERT(node->arrayMode().doesConversion());
756     
757     GPRTemporary temp(this);
758     GPRTemporary structure;
759     GPRReg tempGPR = temp.gpr();
760     GPRReg structureGPR = InvalidGPRReg;
761     
762     if (node->op() != ArrayifyToStructure) {
763         GPRTemporary realStructure(this);
764         structure.adopt(realStructure);
765         structureGPR = structure.gpr();
766     }
767         
768     // We can skip all that comes next if we already have array storage.
769     MacroAssembler::JumpList slowPath;
770     
771     if (node->op() == ArrayifyToStructure) {
772         slowPath.append(m_jit.branchWeakStructure(
773             JITCompiler::NotEqual,
774             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
775             node->structure()));
776     } else {
777         m_jit.load8(
778             MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
779         
780         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
781     }
782     
783     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
784         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
785     
786     noResult(m_currentNode);
787 }
788
789 void SpeculativeJIT::arrayify(Node* node)
790 {
791     ASSERT(node->arrayMode().isSpecific());
792     
793     SpeculateCellOperand base(this, node->child1());
794     
795     if (!node->child2()) {
796         arrayify(node, base.gpr(), InvalidGPRReg);
797         return;
798     }
799     
800     SpeculateInt32Operand property(this, node->child2());
801     
802     arrayify(node, base.gpr(), property.gpr());
803 }
804
805 GPRReg SpeculativeJIT::fillStorage(Edge edge)
806 {
807     VirtualRegister virtualRegister = edge->virtualRegister();
808     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
809     
810     switch (info.registerFormat()) {
811     case DataFormatNone: {
812         if (info.spillFormat() == DataFormatStorage) {
813             GPRReg gpr = allocate();
814             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
815             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
816             info.fillStorage(*m_stream, gpr);
817             return gpr;
818         }
819         
820         // Must be a cell; fill it as a cell and then return the pointer.
821         return fillSpeculateCell(edge);
822     }
823         
824     case DataFormatStorage: {
825         GPRReg gpr = info.gpr();
826         m_gprs.lock(gpr);
827         return gpr;
828     }
829         
830     default:
831         return fillSpeculateCell(edge);
832     }
833 }
834
835 void SpeculativeJIT::useChildren(Node* node)
836 {
837     if (node->flags() & NodeHasVarArgs) {
838         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
839             if (!!m_jit.graph().m_varArgChildren[childIdx])
840                 use(m_jit.graph().m_varArgChildren[childIdx]);
841         }
842     } else {
843         Edge child1 = node->child1();
844         if (!child1) {
845             ASSERT(!node->child2() && !node->child3());
846             return;
847         }
848         use(child1);
849         
850         Edge child2 = node->child2();
851         if (!child2) {
852             ASSERT(!node->child3());
853             return;
854         }
855         use(child2);
856         
857         Edge child3 = node->child3();
858         if (!child3)
859             return;
860         use(child3);
861     }
862 }
863
864 void SpeculativeJIT::compileIn(Node* node)
865 {
866     SpeculateCellOperand base(this, node->child2());
867     GPRReg baseGPR = base.gpr();
868     
869     if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
870         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
871             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo();
872             
873             GPRTemporary result(this);
874             GPRReg resultGPR = result.gpr();
875
876             use(node->child1());
877             
878             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
879             MacroAssembler::Label done = m_jit.label();
880             
881             auto slowPath = slowPathCall(
882                 jump.m_jump, this, operationInOptimize,
883                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
884                 string->tryGetValueImpl());
885             
886             stubInfo->codeOrigin = node->origin.semantic;
887             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
888             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
889             stubInfo->patch.usedRegisters = usedRegisters();
890             stubInfo->patch.spillMode = NeedToSpill;
891
892             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
893             addSlowPathGenerator(WTF::move(slowPath));
894
895             base.use();
896
897             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
898             return;
899         }
900     }
901
902     JSValueOperand key(this, node->child1());
903     JSValueRegs regs = key.jsValueRegs();
904         
905     GPRFlushedCallResult result(this);
906     GPRReg resultGPR = result.gpr();
907         
908     base.use();
909     key.use();
910         
911     flushRegisters();
912     callOperation(
913         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
914         baseGPR, regs);
915     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
916 }
917
918 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
919 {
920     unsigned branchIndexInBlock = detectPeepHoleBranch();
921     if (branchIndexInBlock != UINT_MAX) {
922         Node* branchNode = m_block->at(branchIndexInBlock);
923
924         ASSERT(node->adjustedRefCount() == 1);
925         
926         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
927     
928         m_indexInBlock = branchIndexInBlock;
929         m_currentNode = branchNode;
930         
931         return true;
932     }
933     
934     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
935     
936     return false;
937 }
938
939 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
940 {
941     unsigned branchIndexInBlock = detectPeepHoleBranch();
942     if (branchIndexInBlock != UINT_MAX) {
943         Node* branchNode = m_block->at(branchIndexInBlock);
944
945         ASSERT(node->adjustedRefCount() == 1);
946         
947         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
948     
949         m_indexInBlock = branchIndexInBlock;
950         m_currentNode = branchNode;
951         
952         return true;
953     }
954     
955     nonSpeculativeNonPeepholeStrictEq(node, invert);
956     
957     return false;
958 }
959
960 static const char* dataFormatString(DataFormat format)
961 {
962     // These values correspond to the DataFormat enum.
963     const char* strings[] = {
964         "[  ]",
965         "[ i]",
966         "[ d]",
967         "[ c]",
968         "Err!",
969         "Err!",
970         "Err!",
971         "Err!",
972         "[J ]",
973         "[Ji]",
974         "[Jd]",
975         "[Jc]",
976         "Err!",
977         "Err!",
978         "Err!",
979         "Err!",
980     };
981     return strings[format];
982 }
983
984 void SpeculativeJIT::dump(const char* label)
985 {
986     if (label)
987         dataLogF("<%s>\n", label);
988
989     dataLogF("  gprs:\n");
990     m_gprs.dump();
991     dataLogF("  fprs:\n");
992     m_fprs.dump();
993     dataLogF("  VirtualRegisters:\n");
994     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
995         GenerationInfo& info = m_generationInfo[i];
996         if (info.alive())
997             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
998         else
999             dataLogF("    % 3d:[__][__]", i);
1000         if (info.registerFormat() == DataFormatDouble)
1001             dataLogF(":fpr%d\n", info.fpr());
1002         else if (info.registerFormat() != DataFormatNone
1003 #if USE(JSVALUE32_64)
1004             && !(info.registerFormat() & DataFormatJS)
1005 #endif
1006             ) {
1007             ASSERT(info.gpr() != InvalidGPRReg);
1008             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1009         } else
1010             dataLogF("\n");
1011     }
1012     if (label)
1013         dataLogF("</%s>\n", label);
1014 }
1015
1016 GPRTemporary::GPRTemporary()
1017     : m_jit(0)
1018     , m_gpr(InvalidGPRReg)
1019 {
1020 }
1021
1022 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1023     : m_jit(jit)
1024     , m_gpr(InvalidGPRReg)
1025 {
1026     m_gpr = m_jit->allocate();
1027 }
1028
1029 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1030     : m_jit(jit)
1031     , m_gpr(InvalidGPRReg)
1032 {
1033     m_gpr = m_jit->allocate(specific);
1034 }
1035
1036 #if USE(JSVALUE32_64)
1037 GPRTemporary::GPRTemporary(
1038     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1039     : m_jit(jit)
1040     , m_gpr(InvalidGPRReg)
1041 {
1042     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1043         m_gpr = m_jit->reuse(op1.gpr(which));
1044     else
1045         m_gpr = m_jit->allocate();
1046 }
1047 #endif // USE(JSVALUE32_64)
1048
1049 JSValueRegsTemporary::JSValueRegsTemporary() { }
1050
1051 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1052 #if USE(JSVALUE64)
1053     : m_gpr(jit)
1054 #else
1055     : m_payloadGPR(jit)
1056     , m_tagGPR(jit)
1057 #endif
1058 {
1059 }
1060
1061 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1062
1063 JSValueRegs JSValueRegsTemporary::regs()
1064 {
1065 #if USE(JSVALUE64)
1066     return JSValueRegs(m_gpr.gpr());
1067 #else
1068     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1069 #endif
1070 }
1071
1072 void GPRTemporary::adopt(GPRTemporary& other)
1073 {
1074     ASSERT(!m_jit);
1075     ASSERT(m_gpr == InvalidGPRReg);
1076     ASSERT(other.m_jit);
1077     ASSERT(other.m_gpr != InvalidGPRReg);
1078     m_jit = other.m_jit;
1079     m_gpr = other.m_gpr;
1080     other.m_jit = 0;
1081     other.m_gpr = InvalidGPRReg;
1082 }
1083
1084 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1085     : m_jit(jit)
1086     , m_fpr(InvalidFPRReg)
1087 {
1088     m_fpr = m_jit->fprAllocate();
1089 }
1090
1091 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1092     : m_jit(jit)
1093     , m_fpr(InvalidFPRReg)
1094 {
1095     if (m_jit->canReuse(op1.node()))
1096         m_fpr = m_jit->reuse(op1.fpr());
1097     else
1098         m_fpr = m_jit->fprAllocate();
1099 }
1100
1101 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1102     : m_jit(jit)
1103     , m_fpr(InvalidFPRReg)
1104 {
1105     if (m_jit->canReuse(op1.node()))
1106         m_fpr = m_jit->reuse(op1.fpr());
1107     else if (m_jit->canReuse(op2.node()))
1108         m_fpr = m_jit->reuse(op2.fpr());
1109     else
1110         m_fpr = m_jit->fprAllocate();
1111 }
1112
1113 #if USE(JSVALUE32_64)
1114 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1115     : m_jit(jit)
1116     , m_fpr(InvalidFPRReg)
1117 {
1118     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1119         m_fpr = m_jit->reuse(op1.fpr());
1120     else
1121         m_fpr = m_jit->fprAllocate();
1122 }
1123 #endif
1124
1125 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1126 {
1127     BasicBlock* taken = branchNode->branchData()->taken.block;
1128     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1129     
1130     SpeculateDoubleOperand op1(this, node->child1());
1131     SpeculateDoubleOperand op2(this, node->child2());
1132     
1133     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1134     jump(notTaken);
1135 }
1136
1137 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1138 {
1139     BasicBlock* taken = branchNode->branchData()->taken.block;
1140     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1141
1142     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1143     
1144     if (taken == nextBlock()) {
1145         condition = MacroAssembler::NotEqual;
1146         BasicBlock* tmp = taken;
1147         taken = notTaken;
1148         notTaken = tmp;
1149     }
1150
1151     SpeculateCellOperand op1(this, node->child1());
1152     SpeculateCellOperand op2(this, node->child2());
1153     
1154     GPRReg op1GPR = op1.gpr();
1155     GPRReg op2GPR = op2.gpr();
1156     
1157     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1158         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1159             speculationCheck(
1160                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), branchNotObject(op1GPR));
1161         }
1162         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1163             speculationCheck(
1164                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), branchNotObject(op2GPR));
1165         }
1166     } else {
1167         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1168             speculationCheck(
1169                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1170                 branchNotObject(op1GPR));
1171         }
1172         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1173             m_jit.branchTest8(
1174                 MacroAssembler::NonZero, 
1175                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1176                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1177
1178         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1179             speculationCheck(
1180                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1181                 branchNotObject(op2GPR));
1182         }
1183         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1184             m_jit.branchTest8(
1185                 MacroAssembler::NonZero, 
1186                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1187                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1188     }
1189
1190     branchPtr(condition, op1GPR, op2GPR, taken);
1191     jump(notTaken);
1192 }
1193
1194 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1195 {
1196     BasicBlock* taken = branchNode->branchData()->taken.block;
1197     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1198
1199     // The branch instruction will branch to the taken block.
1200     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1201     if (taken == nextBlock()) {
1202         condition = JITCompiler::invert(condition);
1203         BasicBlock* tmp = taken;
1204         taken = notTaken;
1205         notTaken = tmp;
1206     }
1207
1208     if (node->child1()->isBooleanConstant()) {
1209         bool imm = node->child1()->asBoolean();
1210         SpeculateBooleanOperand op2(this, node->child2());
1211         branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1212     } else if (node->child2()->isBooleanConstant()) {
1213         SpeculateBooleanOperand op1(this, node->child1());
1214         bool imm = node->child2()->asBoolean();
1215         branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1216     } else {
1217         SpeculateBooleanOperand op1(this, node->child1());
1218         SpeculateBooleanOperand op2(this, node->child2());
1219         branch32(condition, op1.gpr(), op2.gpr(), taken);
1220     }
1221
1222     jump(notTaken);
1223 }
1224
1225 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1226 {
1227     BasicBlock* taken = branchNode->branchData()->taken.block;
1228     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1229
1230     // The branch instruction will branch to the taken block.
1231     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1232     if (taken == nextBlock()) {
1233         condition = JITCompiler::invert(condition);
1234         BasicBlock* tmp = taken;
1235         taken = notTaken;
1236         notTaken = tmp;
1237     }
1238
1239     if (node->child1()->isInt32Constant()) {
1240         int32_t imm = node->child1()->asInt32();
1241         SpeculateInt32Operand op2(this, node->child2());
1242         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1243     } else if (node->child2()->isInt32Constant()) {
1244         SpeculateInt32Operand op1(this, node->child1());
1245         int32_t imm = node->child2()->asInt32();
1246         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1247     } else {
1248         SpeculateInt32Operand op1(this, node->child1());
1249         SpeculateInt32Operand op2(this, node->child2());
1250         branch32(condition, op1.gpr(), op2.gpr(), taken);
1251     }
1252
1253     jump(notTaken);
1254 }
1255
1256 // Returns true if the compare is fused with a subsequent branch.
1257 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1258 {
1259     // Fused compare & branch.
1260     unsigned branchIndexInBlock = detectPeepHoleBranch();
1261     if (branchIndexInBlock != UINT_MAX) {
1262         Node* branchNode = m_block->at(branchIndexInBlock);
1263
1264         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1265         // so can be no intervening nodes to also reference the compare. 
1266         ASSERT(node->adjustedRefCount() == 1);
1267
1268         if (node->isBinaryUseKind(Int32Use))
1269             compilePeepHoleInt32Branch(node, branchNode, condition);
1270 #if USE(JSVALUE64)
1271         else if (node->isBinaryUseKind(Int52RepUse))
1272             compilePeepHoleInt52Branch(node, branchNode, condition);
1273 #endif // USE(JSVALUE64)
1274         else if (node->isBinaryUseKind(DoubleRepUse))
1275             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1276         else if (node->op() == CompareEq) {
1277             if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1278                 // Use non-peephole comparison, for now.
1279                 return false;
1280             }
1281             if (node->isBinaryUseKind(BooleanUse))
1282                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1283             else if (node->isBinaryUseKind(ObjectUse))
1284                 compilePeepHoleObjectEquality(node, branchNode);
1285             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1286                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1287             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1288                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1289             else {
1290                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1291                 return true;
1292             }
1293         } else {
1294             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1295             return true;
1296         }
1297
1298         use(node->child1());
1299         use(node->child2());
1300         m_indexInBlock = branchIndexInBlock;
1301         m_currentNode = branchNode;
1302         return true;
1303     }
1304     return false;
1305 }
1306
1307 void SpeculativeJIT::noticeOSRBirth(Node* node)
1308 {
1309     if (!node->hasVirtualRegister())
1310         return;
1311     
1312     VirtualRegister virtualRegister = node->virtualRegister();
1313     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1314     
1315     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1316 }
1317
1318 void SpeculativeJIT::compileMovHint(Node* node)
1319 {
1320     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1321     
1322     Node* child = node->child1().node();
1323     noticeOSRBirth(child);
1324     
1325     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1326 }
1327
1328 void SpeculativeJIT::bail(AbortReason reason)
1329 {
1330     if (verboseCompilationEnabled())
1331         dataLog("Bailing compilation.\n");
1332     m_compileOkay = true;
1333     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1334     clearGenerationInfo();
1335 }
1336
1337 void SpeculativeJIT::compileCurrentBlock()
1338 {
1339     ASSERT(m_compileOkay);
1340     
1341     if (!m_block)
1342         return;
1343     
1344     ASSERT(m_block->isReachable);
1345     
1346     m_jit.blockHeads()[m_block->index] = m_jit.label();
1347
1348     if (!m_block->intersectionOfCFAHasVisited) {
1349         // Don't generate code for basic blocks that are unreachable according to CFA.
1350         // But to be sure that nobody has generated a jump to this block, drop in a
1351         // breakpoint here.
1352         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1353         return;
1354     }
1355
1356     m_stream->appendAndLog(VariableEvent::reset());
1357     
1358     m_jit.jitAssertHasValidCallFrame();
1359     m_jit.jitAssertTagsInPlace();
1360     m_jit.jitAssertArgumentCountSane();
1361
1362     m_state.reset();
1363     m_state.beginBasicBlock(m_block);
1364     
1365     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1366         int operand = m_block->variablesAtHead.operandForIndex(i);
1367         Node* node = m_block->variablesAtHead[i];
1368         if (!node)
1369             continue; // No need to record dead SetLocal's.
1370         
1371         VariableAccessData* variable = node->variableAccessData();
1372         DataFormat format;
1373         if (!node->refCount())
1374             continue; // No need to record dead SetLocal's.
1375         format = dataFormatFor(variable->flushFormat());
1376         m_stream->appendAndLog(
1377             VariableEvent::setLocal(
1378                 VirtualRegister(operand),
1379                 variable->machineLocal(),
1380                 format));
1381     }
1382     
1383     m_codeOriginForExitTarget = CodeOrigin();
1384     m_codeOriginForExitProfile = CodeOrigin();
1385     
1386     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1387         m_currentNode = m_block->at(m_indexInBlock);
1388         
1389         // We may have hit a contradiction that the CFA was aware of but that the JIT
1390         // didn't cause directly.
1391         if (!m_state.isValid()) {
1392             bail(DFGBailedAtTopOfBlock);
1393             return;
1394         }
1395
1396         if (ASSERT_DISABLED)
1397             m_canExit = true; // Essentially disable the assertions.
1398         else
1399             m_canExit = mayExit(m_jit.graph(), m_currentNode);
1400         
1401         m_interpreter.startExecuting();
1402         m_jit.setForNode(m_currentNode);
1403         m_codeOriginForExitTarget = m_currentNode->origin.forExit;
1404         m_codeOriginForExitProfile = m_currentNode->origin.semantic;
1405         m_lastGeneratedNode = m_currentNode->op();
1406         if (!m_currentNode->shouldGenerate()) {
1407             switch (m_currentNode->op()) {
1408             case JSConstant:
1409                 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1410                 break;
1411                 
1412             case SetLocal:
1413                 RELEASE_ASSERT_NOT_REACHED();
1414                 break;
1415                 
1416             case MovHint:
1417                 compileMovHint(m_currentNode);
1418                 break;
1419                 
1420             case ZombieHint: {
1421                 recordSetLocal(m_currentNode->unlinkedLocal(), VirtualRegister(), DataFormatDead);
1422                 break;
1423             }
1424
1425             default:
1426                 if (belongsInMinifiedGraph(m_currentNode->op()))
1427                     m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1428                 break;
1429             }
1430         } else {
1431             
1432             if (verboseCompilationEnabled()) {
1433                 dataLogF(
1434                     "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1435                     (int)m_currentNode->index(),
1436                     m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1437                 dataLog("\n");
1438             }
1439             
1440             compile(m_currentNode);
1441
1442 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1443             m_jit.clearRegisterAllocationOffsets();
1444 #endif
1445
1446             if (!m_compileOkay) {
1447                 bail(DFGBailedAtEndOfNode);
1448                 return;
1449             }
1450             
1451             if (belongsInMinifiedGraph(m_currentNode->op())) {
1452                 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1453                 noticeOSRBirth(m_currentNode);
1454             }
1455         }
1456         
1457         // Make sure that the abstract state is rematerialized for the next node.
1458         m_interpreter.executeEffects(m_indexInBlock);
1459     }
1460     
1461     // Perform the most basic verification that children have been used correctly.
1462     if (!ASSERT_DISABLED) {
1463         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1464             GenerationInfo& info = m_generationInfo[index];
1465             RELEASE_ASSERT(!info.alive());
1466         }
1467     }
1468 }
1469
1470 // If we are making type predictions about our arguments then
1471 // we need to check that they are correct on function entry.
1472 void SpeculativeJIT::checkArgumentTypes()
1473 {
1474     ASSERT(!m_currentNode);
1475     m_isCheckingArgumentTypes = true;
1476     m_codeOriginForExitTarget = CodeOrigin(0);
1477     m_codeOriginForExitProfile = CodeOrigin(0);
1478
1479     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1480         Node* node = m_jit.graph().m_arguments[i];
1481         if (!node) {
1482             // The argument is dead. We don't do any checks for such arguments.
1483             continue;
1484         }
1485         
1486         ASSERT(node->op() == SetArgument);
1487         ASSERT(node->shouldGenerate());
1488
1489         VariableAccessData* variableAccessData = node->variableAccessData();
1490         FlushFormat format = variableAccessData->flushFormat();
1491         
1492         if (format == FlushedJSValue)
1493             continue;
1494         
1495         VirtualRegister virtualRegister = variableAccessData->local();
1496
1497         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1498         
1499 #if USE(JSVALUE64)
1500         switch (format) {
1501         case FlushedInt32: {
1502             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1503             break;
1504         }
1505         case FlushedBoolean: {
1506             GPRTemporary temp(this);
1507             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1508             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1509             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1510             break;
1511         }
1512         case FlushedCell: {
1513             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1514             break;
1515         }
1516         default:
1517             RELEASE_ASSERT_NOT_REACHED();
1518             break;
1519         }
1520 #else
1521         switch (format) {
1522         case FlushedInt32: {
1523             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1524             break;
1525         }
1526         case FlushedBoolean: {
1527             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1528             break;
1529         }
1530         case FlushedCell: {
1531             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1532             break;
1533         }
1534         default:
1535             RELEASE_ASSERT_NOT_REACHED();
1536             break;
1537         }
1538 #endif
1539     }
1540     m_isCheckingArgumentTypes = false;
1541 }
1542
1543 bool SpeculativeJIT::compile()
1544 {
1545     checkArgumentTypes();
1546     
1547     ASSERT(!m_currentNode);
1548     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1549         m_jit.setForBlockIndex(blockIndex);
1550         m_block = m_jit.graph().block(blockIndex);
1551         compileCurrentBlock();
1552     }
1553     linkBranches();
1554     return true;
1555 }
1556
1557 void SpeculativeJIT::createOSREntries()
1558 {
1559     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1560         BasicBlock* block = m_jit.graph().block(blockIndex);
1561         if (!block)
1562             continue;
1563         if (!block->isOSRTarget)
1564             continue;
1565         
1566         // Currently we don't have OSR entry trampolines. We could add them
1567         // here if need be.
1568         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1569     }
1570 }
1571
1572 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1573 {
1574     unsigned osrEntryIndex = 0;
1575     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1576         BasicBlock* block = m_jit.graph().block(blockIndex);
1577         if (!block)
1578             continue;
1579         if (!block->isOSRTarget)
1580             continue;
1581         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1582     }
1583     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1584 }
1585
1586 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1587 {
1588     Edge child3 = m_jit.graph().varArgChild(node, 2);
1589     Edge child4 = m_jit.graph().varArgChild(node, 3);
1590
1591     ArrayMode arrayMode = node->arrayMode();
1592     
1593     GPRReg baseReg = base.gpr();
1594     GPRReg propertyReg = property.gpr();
1595     
1596     SpeculateDoubleOperand value(this, child3);
1597
1598     FPRReg valueReg = value.fpr();
1599     
1600     DFG_TYPE_CHECK(
1601         JSValueRegs(), child3, SpecFullRealNumber,
1602         m_jit.branchDouble(
1603             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1604     
1605     if (!m_compileOkay)
1606         return;
1607     
1608     StorageOperand storage(this, child4);
1609     GPRReg storageReg = storage.gpr();
1610
1611     if (node->op() == PutByValAlias) {
1612         // Store the value to the array.
1613         GPRReg propertyReg = property.gpr();
1614         FPRReg valueReg = value.fpr();
1615         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1616         
1617         noResult(m_currentNode);
1618         return;
1619     }
1620     
1621     GPRTemporary temporary;
1622     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1623
1624     MacroAssembler::Jump slowCase;
1625     
1626     if (arrayMode.isInBounds()) {
1627         speculationCheck(
1628             OutOfBounds, JSValueRegs(), 0,
1629             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1630     } else {
1631         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1632         
1633         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1634         
1635         if (!arrayMode.isOutOfBounds())
1636             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1637         
1638         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1639         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1640         
1641         inBounds.link(&m_jit);
1642     }
1643     
1644     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1645
1646     base.use();
1647     property.use();
1648     value.use();
1649     storage.use();
1650     
1651     if (arrayMode.isOutOfBounds()) {
1652         addSlowPathGenerator(
1653             slowPathCall(
1654                 slowCase, this,
1655                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1656                 NoResult, baseReg, propertyReg, valueReg));
1657     }
1658
1659     noResult(m_currentNode, UseChildrenCalledExplicitly);
1660 }
1661
1662 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1663 {
1664     SpeculateCellOperand string(this, node->child1());
1665     SpeculateStrictInt32Operand index(this, node->child2());
1666     StorageOperand storage(this, node->child3());
1667
1668     GPRReg stringReg = string.gpr();
1669     GPRReg indexReg = index.gpr();
1670     GPRReg storageReg = storage.gpr();
1671     
1672     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1673
1674     // unsigned comparison so we can filter out negative indices and indices that are too large
1675     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1676
1677     GPRTemporary scratch(this);
1678     GPRReg scratchReg = scratch.gpr();
1679
1680     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1681
1682     // Load the character into scratchReg
1683     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1684
1685     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1686     JITCompiler::Jump cont8Bit = m_jit.jump();
1687
1688     is16Bit.link(&m_jit);
1689
1690     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1691
1692     cont8Bit.link(&m_jit);
1693
1694     int32Result(scratchReg, m_currentNode);
1695 }
1696
1697 void SpeculativeJIT::compileGetByValOnString(Node* node)
1698 {
1699     SpeculateCellOperand base(this, node->child1());
1700     SpeculateStrictInt32Operand property(this, node->child2());
1701     StorageOperand storage(this, node->child3());
1702     GPRReg baseReg = base.gpr();
1703     GPRReg propertyReg = property.gpr();
1704     GPRReg storageReg = storage.gpr();
1705
1706     GPRTemporary scratch(this);
1707     GPRReg scratchReg = scratch.gpr();
1708 #if USE(JSVALUE32_64)
1709     GPRTemporary resultTag;
1710     GPRReg resultTagReg = InvalidGPRReg;
1711     if (node->arrayMode().isOutOfBounds()) {
1712         GPRTemporary realResultTag(this);
1713         resultTag.adopt(realResultTag);
1714         resultTagReg = resultTag.gpr();
1715     }
1716 #endif
1717
1718     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1719
1720     // unsigned comparison so we can filter out negative indices and indices that are too large
1721     JITCompiler::Jump outOfBounds = m_jit.branch32(
1722         MacroAssembler::AboveOrEqual, propertyReg,
1723         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1724     if (node->arrayMode().isInBounds())
1725         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1726
1727     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1728
1729     // Load the character into scratchReg
1730     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1731
1732     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1733     JITCompiler::Jump cont8Bit = m_jit.jump();
1734
1735     is16Bit.link(&m_jit);
1736
1737     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1738
1739     JITCompiler::Jump bigCharacter =
1740         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1741
1742     // 8 bit string values don't need the isASCII check.
1743     cont8Bit.link(&m_jit);
1744
1745     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1746     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1747     m_jit.loadPtr(scratchReg, scratchReg);
1748
1749     addSlowPathGenerator(
1750         slowPathCall(
1751             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1752
1753     if (node->arrayMode().isOutOfBounds()) {
1754 #if USE(JSVALUE32_64)
1755         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1756 #endif
1757
1758         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1759         if (globalObject->stringPrototypeChainIsSane()) {
1760 #if USE(JSVALUE64)
1761             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1762                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
1763 #else
1764             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1765                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
1766                 baseReg, propertyReg));
1767 #endif
1768         } else {
1769 #if USE(JSVALUE64)
1770             addSlowPathGenerator(
1771                 slowPathCall(
1772                     outOfBounds, this, operationGetByValStringInt,
1773                     scratchReg, baseReg, propertyReg));
1774 #else
1775             addSlowPathGenerator(
1776                 slowPathCall(
1777                     outOfBounds, this, operationGetByValStringInt,
1778                     resultTagReg, scratchReg, baseReg, propertyReg));
1779 #endif
1780         }
1781         
1782 #if USE(JSVALUE64)
1783         jsValueResult(scratchReg, m_currentNode);
1784 #else
1785         jsValueResult(resultTagReg, scratchReg, m_currentNode);
1786 #endif
1787     } else
1788         cellResult(scratchReg, m_currentNode);
1789 }
1790
1791 void SpeculativeJIT::compileFromCharCode(Node* node)
1792 {
1793     SpeculateStrictInt32Operand property(this, node->child1());
1794     GPRReg propertyReg = property.gpr();
1795     GPRTemporary smallStrings(this);
1796     GPRTemporary scratch(this);
1797     GPRReg scratchReg = scratch.gpr();
1798     GPRReg smallStringsReg = smallStrings.gpr();
1799
1800     JITCompiler::JumpList slowCases;
1801     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
1802     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
1803     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
1804
1805     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
1806     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
1807     cellResult(scratchReg, m_currentNode);
1808 }
1809
1810 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
1811 {
1812     VirtualRegister virtualRegister = node->virtualRegister();
1813     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1814
1815     switch (info.registerFormat()) {
1816     case DataFormatStorage:
1817         RELEASE_ASSERT_NOT_REACHED();
1818
1819     case DataFormatBoolean:
1820     case DataFormatCell:
1821         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1822         return GeneratedOperandTypeUnknown;
1823
1824     case DataFormatNone:
1825     case DataFormatJSCell:
1826     case DataFormatJS:
1827     case DataFormatJSBoolean:
1828     case DataFormatJSDouble:
1829         return GeneratedOperandJSValue;
1830
1831     case DataFormatJSInt32:
1832     case DataFormatInt32:
1833         return GeneratedOperandInteger;
1834
1835     default:
1836         RELEASE_ASSERT_NOT_REACHED();
1837         return GeneratedOperandTypeUnknown;
1838     }
1839 }
1840
1841 void SpeculativeJIT::compileValueToInt32(Node* node)
1842 {
1843     switch (node->child1().useKind()) {
1844 #if USE(JSVALUE64)
1845     case Int52RepUse: {
1846         SpeculateStrictInt52Operand op1(this, node->child1());
1847         GPRTemporary result(this, Reuse, op1);
1848         GPRReg op1GPR = op1.gpr();
1849         GPRReg resultGPR = result.gpr();
1850         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
1851         int32Result(resultGPR, node, DataFormatInt32);
1852         return;
1853     }
1854 #endif // USE(JSVALUE64)
1855         
1856     case DoubleRepUse: {
1857         GPRTemporary result(this);
1858         SpeculateDoubleOperand op1(this, node->child1());
1859         FPRReg fpr = op1.fpr();
1860         GPRReg gpr = result.gpr();
1861         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
1862         
1863         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
1864         
1865         int32Result(gpr, node);
1866         return;
1867     }
1868     
1869     case NumberUse:
1870     case NotCellUse: {
1871         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
1872         case GeneratedOperandInteger: {
1873             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
1874             GPRTemporary result(this, Reuse, op1);
1875             m_jit.move(op1.gpr(), result.gpr());
1876             int32Result(result.gpr(), node, op1.format());
1877             return;
1878         }
1879         case GeneratedOperandJSValue: {
1880             GPRTemporary result(this);
1881 #if USE(JSVALUE64)
1882             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1883
1884             GPRReg gpr = op1.gpr();
1885             GPRReg resultGpr = result.gpr();
1886             FPRTemporary tempFpr(this);
1887             FPRReg fpr = tempFpr.fpr();
1888
1889             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
1890             JITCompiler::JumpList converted;
1891
1892             if (node->child1().useKind() == NumberUse) {
1893                 DFG_TYPE_CHECK(
1894                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
1895                     m_jit.branchTest64(
1896                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
1897             } else {
1898                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
1899                 
1900                 DFG_TYPE_CHECK(
1901                     JSValueRegs(gpr), node->child1(), ~SpecCell, branchIsCell(JSValueRegs(gpr)));
1902                 
1903                 // It's not a cell: so true turns into 1 and all else turns into 0.
1904                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
1905                 converted.append(m_jit.jump());
1906                 
1907                 isNumber.link(&m_jit);
1908             }
1909
1910             // First, if we get here we have a double encoded as a JSValue
1911             m_jit.move(gpr, resultGpr);
1912             unboxDouble(resultGpr, fpr);
1913
1914             silentSpillAllRegisters(resultGpr);
1915             callOperation(toInt32, resultGpr, fpr);
1916             silentFillAllRegisters(resultGpr);
1917
1918             converted.append(m_jit.jump());
1919
1920             isInteger.link(&m_jit);
1921             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
1922
1923             converted.link(&m_jit);
1924 #else
1925             Node* childNode = node->child1().node();
1926             VirtualRegister virtualRegister = childNode->virtualRegister();
1927             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1928
1929             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1930
1931             GPRReg payloadGPR = op1.payloadGPR();
1932             GPRReg resultGpr = result.gpr();
1933         
1934             JITCompiler::JumpList converted;
1935
1936             if (info.registerFormat() == DataFormatJSInt32)
1937                 m_jit.move(payloadGPR, resultGpr);
1938             else {
1939                 GPRReg tagGPR = op1.tagGPR();
1940                 FPRTemporary tempFpr(this);
1941                 FPRReg fpr = tempFpr.fpr();
1942                 FPRTemporary scratch(this);
1943
1944                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
1945
1946                 if (node->child1().useKind() == NumberUse) {
1947                     DFG_TYPE_CHECK(
1948                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
1949                         m_jit.branch32(
1950                             MacroAssembler::AboveOrEqual, tagGPR,
1951                             TrustedImm32(JSValue::LowestTag)));
1952                 } else {
1953                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
1954                     
1955                     DFG_TYPE_CHECK(
1956                         op1.jsValueRegs(), node->child1(), ~SpecCell,
1957                         branchIsCell(op1.jsValueRegs()));
1958                     
1959                     // It's not a cell: so true turns into 1 and all else turns into 0.
1960                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
1961                     m_jit.move(TrustedImm32(0), resultGpr);
1962                     converted.append(m_jit.jump());
1963                     
1964                     isBoolean.link(&m_jit);
1965                     m_jit.move(payloadGPR, resultGpr);
1966                     converted.append(m_jit.jump());
1967                     
1968                     isNumber.link(&m_jit);
1969                 }
1970
1971                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
1972
1973                 silentSpillAllRegisters(resultGpr);
1974                 callOperation(toInt32, resultGpr, fpr);
1975                 silentFillAllRegisters(resultGpr);
1976
1977                 converted.append(m_jit.jump());
1978
1979                 isInteger.link(&m_jit);
1980                 m_jit.move(payloadGPR, resultGpr);
1981
1982                 converted.link(&m_jit);
1983             }
1984 #endif
1985             int32Result(resultGpr, node);
1986             return;
1987         }
1988         case GeneratedOperandTypeUnknown:
1989             RELEASE_ASSERT(!m_compileOkay);
1990             return;
1991         }
1992         RELEASE_ASSERT_NOT_REACHED();
1993         return;
1994     }
1995     
1996     default:
1997         ASSERT(!m_compileOkay);
1998         return;
1999     }
2000 }
2001
2002 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2003 {
2004     if (doesOverflow(node->arithMode())) {
2005         // We know that this sometimes produces doubles. So produce a double every
2006         // time. This at least allows subsequent code to not have weird conditionals.
2007             
2008         SpeculateInt32Operand op1(this, node->child1());
2009         FPRTemporary result(this);
2010             
2011         GPRReg inputGPR = op1.gpr();
2012         FPRReg outputFPR = result.fpr();
2013             
2014         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2015             
2016         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2017         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2018         positive.link(&m_jit);
2019             
2020         doubleResult(outputFPR, node);
2021         return;
2022     }
2023     
2024     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2025
2026     SpeculateInt32Operand op1(this, node->child1());
2027     GPRTemporary result(this);
2028
2029     m_jit.move(op1.gpr(), result.gpr());
2030
2031     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2032
2033     int32Result(result.gpr(), node, op1.format());
2034 }
2035
2036 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2037 {
2038     SpeculateDoubleOperand op1(this, node->child1());
2039     FPRTemporary scratch(this);
2040     GPRTemporary result(this);
2041     
2042     FPRReg valueFPR = op1.fpr();
2043     FPRReg scratchFPR = scratch.fpr();
2044     GPRReg resultGPR = result.gpr();
2045
2046     JITCompiler::JumpList failureCases;
2047     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2048     m_jit.branchConvertDoubleToInt32(
2049         valueFPR, resultGPR, failureCases, scratchFPR,
2050         shouldCheckNegativeZero(node->arithMode()));
2051     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2052
2053     int32Result(resultGPR, node);
2054 }
2055
2056 void SpeculativeJIT::compileDoubleRep(Node* node)
2057 {
2058     switch (node->child1().useKind()) {
2059     case NumberUse: {
2060         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2061     
2062         if (isInt32Speculation(m_state.forNode(node->child1()).m_type)) {
2063             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2064             FPRTemporary result(this);
2065             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2066             doubleResult(result.fpr(), node);
2067             return;
2068         }
2069     
2070         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2071         FPRTemporary result(this);
2072     
2073 #if USE(JSVALUE64)
2074         GPRTemporary temp(this);
2075
2076         GPRReg op1GPR = op1.gpr();
2077         GPRReg tempGPR = temp.gpr();
2078         FPRReg resultFPR = result.fpr();
2079     
2080         JITCompiler::Jump isInteger = m_jit.branch64(
2081             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2082     
2083         if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2084             typeCheck(
2085                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2086                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2087         }
2088     
2089         m_jit.move(op1GPR, tempGPR);
2090         unboxDouble(tempGPR, resultFPR);
2091         JITCompiler::Jump done = m_jit.jump();
2092     
2093         isInteger.link(&m_jit);
2094         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2095         done.link(&m_jit);
2096 #else // USE(JSVALUE64) -> this is the 32_64 case
2097         FPRTemporary temp(this);
2098     
2099         GPRReg op1TagGPR = op1.tagGPR();
2100         GPRReg op1PayloadGPR = op1.payloadGPR();
2101         FPRReg tempFPR = temp.fpr();
2102         FPRReg resultFPR = result.fpr();
2103     
2104         JITCompiler::Jump isInteger = m_jit.branch32(
2105             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2106     
2107         if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2108             typeCheck(
2109                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2110                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2111         }
2112     
2113         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2114         JITCompiler::Jump done = m_jit.jump();
2115     
2116         isInteger.link(&m_jit);
2117         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2118         done.link(&m_jit);
2119 #endif // USE(JSVALUE64)
2120     
2121         doubleResult(resultFPR, node);
2122         return;
2123     }
2124         
2125 #if USE(JSVALUE64)
2126     case Int52RepUse: {
2127         SpeculateStrictInt52Operand value(this, node->child1());
2128         FPRTemporary result(this);
2129         
2130         GPRReg valueGPR = value.gpr();
2131         FPRReg resultFPR = result.fpr();
2132
2133         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2134         
2135         doubleResult(resultFPR, node);
2136         return;
2137     }
2138 #endif // USE(JSVALUE64)
2139         
2140     default:
2141         RELEASE_ASSERT_NOT_REACHED();
2142         return;
2143     }
2144 }
2145
2146 void SpeculativeJIT::compileValueRep(Node* node)
2147 {
2148     switch (node->child1().useKind()) {
2149     case DoubleRepUse: {
2150         SpeculateDoubleOperand value(this, node->child1());
2151         JSValueRegsTemporary result(this);
2152         
2153         FPRReg valueFPR = value.fpr();
2154         JSValueRegs resultRegs = result.regs();
2155         
2156         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2157         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2158         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2159         // local was purified.
2160         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2161             m_jit.purifyNaN(valueFPR);
2162
2163         boxDouble(valueFPR, resultRegs);
2164         
2165         jsValueResult(resultRegs, node);
2166         return;
2167     }
2168         
2169 #if USE(JSVALUE64)
2170     case Int52RepUse: {
2171         SpeculateStrictInt52Operand value(this, node->child1());
2172         GPRTemporary result(this);
2173         
2174         GPRReg valueGPR = value.gpr();
2175         GPRReg resultGPR = result.gpr();
2176         
2177         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2178         
2179         jsValueResult(resultGPR, node);
2180         return;
2181     }
2182 #endif // USE(JSVALUE64)
2183         
2184     default:
2185         RELEASE_ASSERT_NOT_REACHED();
2186         return;
2187     }
2188 }
2189
2190 static double clampDoubleToByte(double d)
2191 {
2192     d += 0.5;
2193     if (!(d > 0))
2194         d = 0;
2195     else if (d > 255)
2196         d = 255;
2197     return d;
2198 }
2199
2200 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2201 {
2202     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2203     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2204     jit.xorPtr(result, result);
2205     MacroAssembler::Jump clamped = jit.jump();
2206     tooBig.link(&jit);
2207     jit.move(JITCompiler::TrustedImm32(255), result);
2208     clamped.link(&jit);
2209     inBounds.link(&jit);
2210 }
2211
2212 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2213 {
2214     // Unordered compare so we pick up NaN
2215     static const double zero = 0;
2216     static const double byteMax = 255;
2217     static const double half = 0.5;
2218     jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2219     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2220     jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2221     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2222     
2223     jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2224     // FIXME: This should probably just use a floating point round!
2225     // https://bugs.webkit.org/show_bug.cgi?id=72054
2226     jit.addDouble(source, scratch);
2227     jit.truncateDoubleToInt32(scratch, result);   
2228     MacroAssembler::Jump truncatedInt = jit.jump();
2229     
2230     tooSmall.link(&jit);
2231     jit.xorPtr(result, result);
2232     MacroAssembler::Jump zeroed = jit.jump();
2233     
2234     tooBig.link(&jit);
2235     jit.move(JITCompiler::TrustedImm32(255), result);
2236     
2237     truncatedInt.link(&jit);
2238     zeroed.link(&jit);
2239
2240 }
2241
2242 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2243 {
2244     if (node->op() == PutByValAlias)
2245         return JITCompiler::Jump();
2246     if (JSArrayBufferView* view = m_jit.graph().tryGetFoldableViewForChild1(node)) {
2247         uint32_t length = view->length();
2248         Node* indexNode = m_jit.graph().child(node, 1).node();
2249         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2250             return JITCompiler::Jump();
2251         return m_jit.branch32(
2252             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2253     }
2254     return m_jit.branch32(
2255         MacroAssembler::AboveOrEqual, indexGPR,
2256         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2257 }
2258
2259 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2260 {
2261     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2262     if (!jump.isSet())
2263         return;
2264     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2265 }
2266
2267 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2268 {
2269     ASSERT(isInt(type));
2270     
2271     SpeculateCellOperand base(this, node->child1());
2272     SpeculateStrictInt32Operand property(this, node->child2());
2273     StorageOperand storage(this, node->child3());
2274
2275     GPRReg baseReg = base.gpr();
2276     GPRReg propertyReg = property.gpr();
2277     GPRReg storageReg = storage.gpr();
2278
2279     GPRTemporary result(this);
2280     GPRReg resultReg = result.gpr();
2281
2282     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2283
2284     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2285     switch (elementSize(type)) {
2286     case 1:
2287         if (isSigned(type))
2288             m_jit.load8Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2289         else
2290             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2291         break;
2292     case 2:
2293         if (isSigned(type))
2294             m_jit.load16Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2295         else
2296             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2297         break;
2298     case 4:
2299         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2300         break;
2301     default:
2302         CRASH();
2303     }
2304     if (elementSize(type) < 4 || isSigned(type)) {
2305         int32Result(resultReg, node);
2306         return;
2307     }
2308     
2309     ASSERT(elementSize(type) == 4 && !isSigned(type));
2310     if (node->shouldSpeculateInt32()) {
2311         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2312         int32Result(resultReg, node);
2313         return;
2314     }
2315     
2316 #if USE(JSVALUE64)
2317     if (node->shouldSpeculateMachineInt()) {
2318         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2319         strictInt52Result(resultReg, node);
2320         return;
2321     }
2322 #endif
2323     
2324     FPRTemporary fresult(this);
2325     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2326     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2327     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2328     positive.link(&m_jit);
2329     doubleResult(fresult.fpr(), node);
2330 }
2331
2332 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2333 {
2334     ASSERT(isInt(type));
2335     
2336     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2337     GPRReg storageReg = storage.gpr();
2338     
2339     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2340     
2341     GPRTemporary value;
2342     GPRReg valueGPR = InvalidGPRReg;
2343     
2344     if (valueUse->isConstant()) {
2345         JSValue jsValue = valueUse->asJSValue();
2346         if (!jsValue.isNumber()) {
2347             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2348             noResult(node);
2349             return;
2350         }
2351         double d = jsValue.asNumber();
2352         if (isClamped(type)) {
2353             ASSERT(elementSize(type) == 1);
2354             d = clampDoubleToByte(d);
2355         }
2356         GPRTemporary scratch(this);
2357         GPRReg scratchReg = scratch.gpr();
2358         m_jit.move(Imm32(toInt32(d)), scratchReg);
2359         value.adopt(scratch);
2360         valueGPR = scratchReg;
2361     } else {
2362         switch (valueUse.useKind()) {
2363         case Int32Use: {
2364             SpeculateInt32Operand valueOp(this, valueUse);
2365             GPRTemporary scratch(this);
2366             GPRReg scratchReg = scratch.gpr();
2367             m_jit.move(valueOp.gpr(), scratchReg);
2368             if (isClamped(type)) {
2369                 ASSERT(elementSize(type) == 1);
2370                 compileClampIntegerToByte(m_jit, scratchReg);
2371             }
2372             value.adopt(scratch);
2373             valueGPR = scratchReg;
2374             break;
2375         }
2376             
2377 #if USE(JSVALUE64)
2378         case Int52RepUse: {
2379             SpeculateStrictInt52Operand valueOp(this, valueUse);
2380             GPRTemporary scratch(this);
2381             GPRReg scratchReg = scratch.gpr();
2382             m_jit.move(valueOp.gpr(), scratchReg);
2383             if (isClamped(type)) {
2384                 ASSERT(elementSize(type) == 1);
2385                 MacroAssembler::Jump inBounds = m_jit.branch64(
2386                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2387                 MacroAssembler::Jump tooBig = m_jit.branch64(
2388                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2389                 m_jit.move(TrustedImm32(0), scratchReg);
2390                 MacroAssembler::Jump clamped = m_jit.jump();
2391                 tooBig.link(&m_jit);
2392                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2393                 clamped.link(&m_jit);
2394                 inBounds.link(&m_jit);
2395             }
2396             value.adopt(scratch);
2397             valueGPR = scratchReg;
2398             break;
2399         }
2400 #endif // USE(JSVALUE64)
2401             
2402         case DoubleRepUse: {
2403             if (isClamped(type)) {
2404                 ASSERT(elementSize(type) == 1);
2405                 SpeculateDoubleOperand valueOp(this, valueUse);
2406                 GPRTemporary result(this);
2407                 FPRTemporary floatScratch(this);
2408                 FPRReg fpr = valueOp.fpr();
2409                 GPRReg gpr = result.gpr();
2410                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2411                 value.adopt(result);
2412                 valueGPR = gpr;
2413             } else {
2414                 SpeculateDoubleOperand valueOp(this, valueUse);
2415                 GPRTemporary result(this);
2416                 FPRReg fpr = valueOp.fpr();
2417                 GPRReg gpr = result.gpr();
2418                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2419                 m_jit.xorPtr(gpr, gpr);
2420                 MacroAssembler::Jump fixed = m_jit.jump();
2421                 notNaN.link(&m_jit);
2422                 
2423                 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2424                     fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2425                 
2426                 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr));
2427                 
2428                 fixed.link(&m_jit);
2429                 value.adopt(result);
2430                 valueGPR = gpr;
2431             }
2432             break;
2433         }
2434             
2435         default:
2436             RELEASE_ASSERT_NOT_REACHED();
2437             break;
2438         }
2439     }
2440     
2441     ASSERT_UNUSED(valueGPR, valueGPR != property);
2442     ASSERT(valueGPR != base);
2443     ASSERT(valueGPR != storageReg);
2444     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2445     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2446         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2447         outOfBounds = MacroAssembler::Jump();
2448     }
2449
2450     switch (elementSize(type)) {
2451     case 1:
2452         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2453         break;
2454     case 2:
2455         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2456         break;
2457     case 4:
2458         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2459         break;
2460     default:
2461         CRASH();
2462     }
2463     if (outOfBounds.isSet())
2464         outOfBounds.link(&m_jit);
2465     noResult(node);
2466 }
2467
2468 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2469 {
2470     ASSERT(isFloat(type));
2471     
2472     SpeculateCellOperand base(this, node->child1());
2473     SpeculateStrictInt32Operand property(this, node->child2());
2474     StorageOperand storage(this, node->child3());
2475
2476     GPRReg baseReg = base.gpr();
2477     GPRReg propertyReg = property.gpr();
2478     GPRReg storageReg = storage.gpr();
2479
2480     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2481
2482     FPRTemporary result(this);
2483     FPRReg resultReg = result.fpr();
2484     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2485     switch (elementSize(type)) {
2486     case 4:
2487         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2488         m_jit.convertFloatToDouble(resultReg, resultReg);
2489         break;
2490     case 8: {
2491         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2492         break;
2493     }
2494     default:
2495         RELEASE_ASSERT_NOT_REACHED();
2496     }
2497     
2498     doubleResult(resultReg, node);
2499 }
2500
2501 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2502 {
2503     ASSERT(isFloat(type));
2504     
2505     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2506     GPRReg storageReg = storage.gpr();
2507     
2508     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2509     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2510
2511     SpeculateDoubleOperand valueOp(this, valueUse);
2512     FPRTemporary scratch(this);
2513     FPRReg valueFPR = valueOp.fpr();
2514     FPRReg scratchFPR = scratch.fpr();
2515
2516     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2517     
2518     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2519     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2520         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2521         outOfBounds = MacroAssembler::Jump();
2522     }
2523     
2524     switch (elementSize(type)) {
2525     case 4: {
2526         m_jit.moveDouble(valueFPR, scratchFPR);
2527         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2528         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2529         break;
2530     }
2531     case 8:
2532         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2533         break;
2534     default:
2535         RELEASE_ASSERT_NOT_REACHED();
2536     }
2537     if (outOfBounds.isSet())
2538         outOfBounds.link(&m_jit);
2539     noResult(node);
2540 }
2541
2542 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2543 {
2544     // Check that prototype is an object.
2545     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfCellNotObject(prototypeReg));
2546     
2547     // Initialize scratchReg with the value being checked.
2548     m_jit.move(valueReg, scratchReg);
2549     
2550     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2551     MacroAssembler::Label loop(&m_jit);
2552     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2553     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2554     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2555 #if USE(JSVALUE64)
2556     branchIsCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2557 #else
2558     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2559 #endif
2560     
2561     // No match - result is false.
2562 #if USE(JSVALUE64)
2563     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2564 #else
2565     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2566 #endif
2567     MacroAssembler::Jump putResult = m_jit.jump();
2568     
2569     isInstance.link(&m_jit);
2570 #if USE(JSVALUE64)
2571     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2572 #else
2573     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2574 #endif
2575     
2576     putResult.link(&m_jit);
2577 }
2578
2579 void SpeculativeJIT::compileInstanceOf(Node* node)
2580 {
2581     if (node->child1().useKind() == UntypedUse) {
2582         // It might not be a cell. Speculate less aggressively.
2583         // Or: it might only be used once (i.e. by us), so we get zero benefit
2584         // from speculating any more aggressively than we absolutely need to.
2585         
2586         JSValueOperand value(this, node->child1());
2587         SpeculateCellOperand prototype(this, node->child2());
2588         GPRTemporary scratch(this);
2589         GPRTemporary scratch2(this);
2590         
2591         GPRReg prototypeReg = prototype.gpr();
2592         GPRReg scratchReg = scratch.gpr();
2593         GPRReg scratch2Reg = scratch2.gpr();
2594         
2595         MacroAssembler::Jump isCell = branchIsCell(value.jsValueRegs());
2596         GPRReg valueReg = value.jsValueRegs().payloadGPR();
2597         moveFalseTo(scratchReg);
2598
2599         MacroAssembler::Jump done = m_jit.jump();
2600         
2601         isCell.link(&m_jit);
2602         
2603         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2604         
2605         done.link(&m_jit);
2606
2607         blessedBooleanResult(scratchReg, node);
2608         return;
2609     }
2610     
2611     SpeculateCellOperand value(this, node->child1());
2612     SpeculateCellOperand prototype(this, node->child2());
2613     
2614     GPRTemporary scratch(this);
2615     GPRTemporary scratch2(this);
2616     
2617     GPRReg valueReg = value.gpr();
2618     GPRReg prototypeReg = prototype.gpr();
2619     GPRReg scratchReg = scratch.gpr();
2620     GPRReg scratch2Reg = scratch2.gpr();
2621     
2622     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2623
2624     blessedBooleanResult(scratchReg, node);
2625 }
2626
2627 void SpeculativeJIT::compileAdd(Node* node)
2628 {
2629     switch (node->binaryUseKind()) {
2630     case Int32Use: {
2631         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2632         
2633         if (node->child1()->isInt32Constant()) {
2634             int32_t imm1 = node->child1()->asInt32();
2635             SpeculateInt32Operand op2(this, node->child2());
2636             GPRTemporary result(this);
2637
2638             if (!shouldCheckOverflow(node->arithMode())) {
2639                 m_jit.move(op2.gpr(), result.gpr());
2640                 m_jit.add32(Imm32(imm1), result.gpr());
2641             } else
2642                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
2643
2644             int32Result(result.gpr(), node);
2645             return;
2646         }
2647         
2648         if (node->child2()->isInt32Constant()) {
2649             SpeculateInt32Operand op1(this, node->child1());
2650             int32_t imm2 = node->child2()->asInt32();
2651             GPRTemporary result(this);
2652                 
2653             if (!shouldCheckOverflow(node->arithMode())) {
2654                 m_jit.move(op1.gpr(), result.gpr());
2655                 m_jit.add32(Imm32(imm2), result.gpr());
2656             } else
2657                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
2658
2659             int32Result(result.gpr(), node);
2660             return;
2661         }
2662                 
2663         SpeculateInt32Operand op1(this, node->child1());
2664         SpeculateInt32Operand op2(this, node->child2());
2665         GPRTemporary result(this, Reuse, op1, op2);
2666
2667         GPRReg gpr1 = op1.gpr();
2668         GPRReg gpr2 = op2.gpr();
2669         GPRReg gprResult = result.gpr();
2670
2671         if (!shouldCheckOverflow(node->arithMode())) {
2672             if (gpr1 == gprResult)
2673                 m_jit.add32(gpr2, gprResult);
2674             else {
2675                 m_jit.move(gpr2, gprResult);
2676                 m_jit.add32(gpr1, gprResult);
2677             }
2678         } else {
2679             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
2680                 
2681             if (gpr1 == gprResult)
2682                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
2683             else if (gpr2 == gprResult)
2684                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
2685             else
2686                 speculationCheck(Overflow, JSValueRegs(), 0, check);
2687         }
2688
2689         int32Result(gprResult, node);
2690         return;
2691     }
2692         
2693 #if USE(JSVALUE64)
2694     case Int52RepUse: {
2695         ASSERT(shouldCheckOverflow(node->arithMode()));
2696         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2697
2698         // Will we need an overflow check? If we can prove that neither input can be
2699         // Int52 then the overflow check will not be necessary.
2700         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2701             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2702             SpeculateWhicheverInt52Operand op1(this, node->child1());
2703             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2704             GPRTemporary result(this, Reuse, op1);
2705             m_jit.move(op1.gpr(), result.gpr());
2706             m_jit.add64(op2.gpr(), result.gpr());
2707             int52Result(result.gpr(), node, op1.format());
2708             return;
2709         }
2710         
2711         SpeculateInt52Operand op1(this, node->child1());
2712         SpeculateInt52Operand op2(this, node->child2());
2713         GPRTemporary result(this);
2714         m_jit.move(op1.gpr(), result.gpr());
2715         speculationCheck(
2716             Int52Overflow, JSValueRegs(), 0,
2717             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2718         int52Result(result.gpr(), node);
2719         return;
2720     }
2721 #endif // USE(JSVALUE64)
2722     
2723     case DoubleRepUse: {
2724         SpeculateDoubleOperand op1(this, node->child1());
2725         SpeculateDoubleOperand op2(this, node->child2());
2726         FPRTemporary result(this, op1, op2);
2727
2728         FPRReg reg1 = op1.fpr();
2729         FPRReg reg2 = op2.fpr();
2730         m_jit.addDouble(reg1, reg2, result.fpr());
2731
2732         doubleResult(result.fpr(), node);
2733         return;
2734     }
2735         
2736     default:
2737         RELEASE_ASSERT_NOT_REACHED();
2738         break;
2739     }
2740 }
2741
2742 void SpeculativeJIT::compileMakeRope(Node* node)
2743 {
2744     ASSERT(node->child1().useKind() == KnownStringUse);
2745     ASSERT(node->child2().useKind() == KnownStringUse);
2746     ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
2747     
2748     SpeculateCellOperand op1(this, node->child1());
2749     SpeculateCellOperand op2(this, node->child2());
2750     SpeculateCellOperand op3(this, node->child3());
2751     GPRTemporary result(this);
2752     GPRTemporary allocator(this);
2753     GPRTemporary scratch(this);
2754     
2755     GPRReg opGPRs[3];
2756     unsigned numOpGPRs;
2757     opGPRs[0] = op1.gpr();
2758     opGPRs[1] = op2.gpr();
2759     if (node->child3()) {
2760         opGPRs[2] = op3.gpr();
2761         numOpGPRs = 3;
2762     } else {
2763         opGPRs[2] = InvalidGPRReg;
2764         numOpGPRs = 2;
2765     }
2766     GPRReg resultGPR = result.gpr();
2767     GPRReg allocatorGPR = allocator.gpr();
2768     GPRReg scratchGPR = scratch.gpr();
2769     
2770     JITCompiler::JumpList slowPath;
2771     MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithImmortalStructureDestructor(sizeof(JSRopeString));
2772     m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
2773     emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
2774         
2775     m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
2776     for (unsigned i = 0; i < numOpGPRs; ++i)
2777         m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2778     for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
2779         m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2780     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
2781     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
2782     if (!ASSERT_DISABLED) {
2783         JITCompiler::Jump ok = m_jit.branch32(
2784             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2785         m_jit.abortWithReason(DFGNegativeStringLength);
2786         ok.link(&m_jit);
2787     }
2788     for (unsigned i = 1; i < numOpGPRs; ++i) {
2789         m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
2790         speculationCheck(
2791             Uncountable, JSValueSource(), nullptr,
2792             m_jit.branchAdd32(
2793                 JITCompiler::Overflow,
2794                 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
2795     }
2796     m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
2797     m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
2798     if (!ASSERT_DISABLED) {
2799         JITCompiler::Jump ok = m_jit.branch32(
2800             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2801         m_jit.abortWithReason(DFGNegativeStringLength);
2802         ok.link(&m_jit);
2803     }
2804     m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
2805     
2806     switch (numOpGPRs) {
2807     case 2:
2808         addSlowPathGenerator(slowPathCall(
2809             slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
2810         break;
2811     case 3:
2812         addSlowPathGenerator(slowPathCall(
2813             slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
2814         break;
2815     default:
2816         RELEASE_ASSERT_NOT_REACHED();
2817         break;
2818     }
2819         
2820     cellResult(resultGPR, node);
2821 }
2822
2823 void SpeculativeJIT::compileArithSub(Node* node)
2824 {
2825     switch (node->binaryUseKind()) {
2826     case Int32Use: {
2827         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2828         
2829         if (node->child2()->isNumberConstant()) {
2830             SpeculateInt32Operand op1(this, node->child1());
2831             int32_t imm2 = node->child2()->asInt32();
2832             GPRTemporary result(this);
2833
2834             if (!shouldCheckOverflow(node->arithMode())) {
2835                 m_jit.move(op1.gpr(), result.gpr());
2836                 m_jit.sub32(Imm32(imm2), result.gpr());
2837             } else {
2838                 GPRTemporary scratch(this);
2839                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
2840             }
2841
2842             int32Result(result.gpr(), node);
2843             return;
2844         }
2845             
2846         if (node->child1()->isNumberConstant()) {
2847             int32_t imm1 = node->child1()->asInt32();
2848             SpeculateInt32Operand op2(this, node->child2());
2849             GPRTemporary result(this);
2850                 
2851             m_jit.move(Imm32(imm1), result.gpr());
2852             if (!shouldCheckOverflow(node->arithMode()))
2853                 m_jit.sub32(op2.gpr(), result.gpr());
2854             else
2855                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2856                 
2857             int32Result(result.gpr(), node);
2858             return;
2859         }
2860             
2861         SpeculateInt32Operand op1(this, node->child1());
2862         SpeculateInt32Operand op2(this, node->child2());
2863         GPRTemporary result(this);
2864
2865         if (!shouldCheckOverflow(node->arithMode())) {
2866             m_jit.move(op1.gpr(), result.gpr());
2867             m_jit.sub32(op2.gpr(), result.gpr());
2868         } else
2869             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
2870
2871         int32Result(result.gpr(), node);
2872         return;
2873     }
2874         
2875 #if USE(JSVALUE64)
2876     case Int52RepUse: {
2877         ASSERT(shouldCheckOverflow(node->arithMode()));
2878         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2879
2880         // Will we need an overflow check? If we can prove that neither input can be
2881         // Int52 then the overflow check will not be necessary.
2882         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2883             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2884             SpeculateWhicheverInt52Operand op1(this, node->child1());
2885             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2886             GPRTemporary result(this, Reuse, op1);
2887             m_jit.move(op1.gpr(), result.gpr());
2888             m_jit.sub64(op2.gpr(), result.gpr());
2889             int52Result(result.gpr(), node, op1.format());
2890             return;
2891         }
2892         
2893         SpeculateInt52Operand op1(this, node->child1());
2894         SpeculateInt52Operand op2(this, node->child2());
2895         GPRTemporary result(this);
2896         m_jit.move(op1.gpr(), result.gpr());
2897         speculationCheck(
2898             Int52Overflow, JSValueRegs(), 0,
2899             m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2900         int52Result(result.gpr(), node);
2901         return;
2902     }
2903 #endif // USE(JSVALUE64)
2904
2905     case DoubleRepUse: {
2906         SpeculateDoubleOperand op1(this, node->child1());
2907         SpeculateDoubleOperand op2(this, node->child2());
2908         FPRTemporary result(this, op1);
2909
2910         FPRReg reg1 = op1.fpr();
2911         FPRReg reg2 = op2.fpr();
2912         m_jit.subDouble(reg1, reg2, result.fpr());
2913
2914         doubleResult(result.fpr(), node);
2915         return;
2916     }
2917         
2918     default:
2919         RELEASE_ASSERT_NOT_REACHED();
2920         return;
2921     }
2922 }
2923
2924 void SpeculativeJIT::compileArithNegate(Node* node)
2925 {
2926     switch (node->child1().useKind()) {
2927     case Int32Use: {
2928         SpeculateInt32Operand op1(this, node->child1());
2929         GPRTemporary result(this);
2930
2931         m_jit.move(op1.gpr(), result.gpr());
2932
2933         // Note: there is no notion of being not used as a number, but someone
2934         // caring about negative zero.
2935         
2936         if (!shouldCheckOverflow(node->arithMode()))
2937             m_jit.neg32(result.gpr());
2938         else if (!shouldCheckNegativeZero(node->arithMode()))
2939             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
2940         else {
2941             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
2942             m_jit.neg32(result.gpr());
2943         }
2944
2945         int32Result(result.gpr(), node);
2946         return;
2947     }
2948
2949 #if USE(JSVALUE64)
2950     case Int52RepUse: {
2951         ASSERT(shouldCheckOverflow(node->arithMode()));
2952         
2953         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)) {
2954             SpeculateWhicheverInt52Operand op1(this, node->child1());
2955             GPRTemporary result(this);
2956             GPRReg op1GPR = op1.gpr();
2957             GPRReg resultGPR = result.gpr();
2958             m_jit.move(op1GPR, resultGPR);
2959             m_jit.neg64(resultGPR);
2960             if (shouldCheckNegativeZero(node->arithMode())) {
2961                 speculationCheck(
2962                     NegativeZero, JSValueRegs(), 0,
2963                     m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
2964             }
2965             int52Result(resultGPR, node, op1.format());
2966             return;
2967         }
2968         
2969         SpeculateInt52Operand op1(this, node->child1());
2970         GPRTemporary result(this);
2971         GPRReg op1GPR = op1.gpr();
2972         GPRReg resultGPR = result.gpr();
2973         m_jit.move(op1GPR, resultGPR);
2974         speculationCheck(
2975             Int52Overflow, JSValueRegs(), 0,
2976             m_jit.branchNeg64(MacroAssembler::Overflow, resultGPR));
2977         if (shouldCheckNegativeZero(node->arithMode())) {
2978             speculationCheck(
2979                 NegativeZero, JSValueRegs(), 0,
2980                 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
2981         }
2982         int52Result(resultGPR, node);
2983         return;
2984     }
2985 #endif // USE(JSVALUE64)
2986         
2987     case DoubleRepUse: {
2988         SpeculateDoubleOperand op1(this, node->child1());
2989         FPRTemporary result(this);
2990         
2991         m_jit.negateDouble(op1.fpr(), result.fpr());
2992         
2993         doubleResult(result.fpr(), node);
2994         return;
2995     }
2996         
2997     default:
2998         RELEASE_ASSERT_NOT_REACHED();
2999         return;
3000     }
3001 }
3002 void SpeculativeJIT::compileArithMul(Node* node)
3003 {
3004     switch (node->binaryUseKind()) {
3005     case Int32Use: {
3006         SpeculateInt32Operand op1(this, node->child1());
3007         SpeculateInt32Operand op2(this, node->child2());
3008         GPRTemporary result(this);
3009
3010         GPRReg reg1 = op1.gpr();
3011         GPRReg reg2 = op2.gpr();
3012
3013         // We can perform truncated multiplications if we get to this point, because if the
3014         // fixup phase could not prove that it would be safe, it would have turned us into
3015         // a double multiplication.
3016         if (!shouldCheckOverflow(node->arithMode())) {
3017             m_jit.move(reg1, result.gpr());
3018             m_jit.mul32(reg2, result.gpr());
3019         } else {
3020             speculationCheck(
3021                 Overflow, JSValueRegs(), 0,
3022                 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3023         }
3024             
3025         // Check for negative zero, if the users of this node care about such things.
3026         if (shouldCheckNegativeZero(node->arithMode())) {
3027             MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3028             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
3029             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
3030             resultNonZero.link(&m_jit);
3031         }
3032
3033         int32Result(result.gpr(), node);
3034         return;
3035     }
3036     
3037 #if USE(JSVALUE64)   
3038     case Int52RepUse: {
3039         ASSERT(shouldCheckOverflow(node->arithMode()));
3040         
3041         // This is super clever. We want to do an int52 multiplication and check the
3042         // int52 overflow bit. There is no direct hardware support for this, but we do
3043         // have the ability to do an int64 multiplication and check the int64 overflow
3044         // bit. We leverage that. Consider that a, b are int52 numbers inside int64
3045         // registers, with the high 12 bits being sign-extended. We can do:
3046         //
3047         //     (a * (b << 12))
3048         //
3049         // This will give us a left-shifted int52 (value is in high 52 bits, low 16
3050         // bits are zero) plus the int52 overflow bit. I.e. whether this 64-bit
3051         // multiplication overflows is identical to whether the 'a * b' 52-bit
3052         // multiplication overflows.
3053         //
3054         // In our nomenclature, this is:
3055         //
3056         //     strictInt52(a) * int52(b) => int52
3057         //
3058         // That is "strictInt52" means unshifted and "int52" means left-shifted by 16
3059         // bits.
3060         //
3061         // We don't care which of op1 or op2 serves as the left-shifted operand, so
3062         // we just do whatever is more convenient for op1 and have op2 do the
3063         // opposite. This ensures that we do at most one shift.
3064
3065         SpeculateWhicheverInt52Operand op1(this, node->child1());
3066         SpeculateWhicheverInt52Operand op2(this, node->child2(), OppositeShift, op1);
3067         GPRTemporary result(this);
3068         
3069         GPRReg op1GPR = op1.gpr();
3070         GPRReg op2GPR = op2.gpr();
3071         GPRReg resultGPR = result.gpr();
3072         
3073         m_jit.move(op1GPR, resultGPR);
3074         speculationCheck(
3075             Int52Overflow, JSValueRegs(), 0,
3076             m_jit.branchMul64(MacroAssembler::Overflow, op2GPR, resultGPR));
3077         
3078         if (shouldCheckNegativeZero(node->arithMode())) {
3079             MacroAssembler::Jump resultNonZero = m_jit.branchTest64(
3080                 MacroAssembler::NonZero, resultGPR);
3081             speculationCheck(
3082                 NegativeZero, JSValueRegs(), 0,
3083                 m_jit.branch64(MacroAssembler::LessThan, op1GPR, TrustedImm64(0)));
3084             speculationCheck(
3085                 NegativeZero, JSValueRegs(), 0,
3086                 m_jit.branch64(MacroAssembler::LessThan, op2GPR, TrustedImm64(0)));
3087             resultNonZero.link(&m_jit);
3088         }
3089         
3090         int52Result(resultGPR, node);
3091         return;
3092     }
3093 #endif // USE(JSVALUE64)
3094         
3095     case DoubleRepUse: {
3096         SpeculateDoubleOperand op1(this, node->child1());
3097         SpeculateDoubleOperand op2(this, node->child2());
3098         FPRTemporary result(this, op1, op2);
3099         
3100         FPRReg reg1 = op1.fpr();
3101         FPRReg reg2 = op2.fpr();
3102         
3103         m_jit.mulDouble(reg1, reg2, result.fpr());
3104         
3105         doubleResult(result.fpr(), node);
3106         return;
3107     }
3108         
3109     default:
3110         RELEASE_ASSERT_NOT_REACHED();
3111         return;
3112     }
3113 }
3114
3115 void SpeculativeJIT::compileArithDiv(Node* node)
3116 {
3117     switch (node->binaryUseKind()) {
3118     case Int32Use: {
3119 #if CPU(X86) || CPU(X86_64)
3120         SpeculateInt32Operand op1(this, node->child1());
3121         SpeculateInt32Operand op2(this, node->child2());
3122         GPRTemporary eax(this, X86Registers::eax);
3123         GPRTemporary edx(this, X86Registers::edx);
3124         GPRReg op1GPR = op1.gpr();
3125         GPRReg op2GPR = op2.gpr();
3126     
3127         GPRReg op2TempGPR;
3128         GPRReg temp;
3129         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3130             op2TempGPR = allocate();
3131             temp = op2TempGPR;
3132         } else {
3133             op2TempGPR = InvalidGPRReg;
3134             if (op1GPR == X86Registers::eax)
3135                 temp = X86Registers::edx;
3136             else
3137                 temp = X86Registers::eax;
3138         }
3139     
3140         ASSERT(temp != op1GPR);
3141         ASSERT(temp != op2GPR);
3142     
3143         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3144     
3145         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3146     
3147         JITCompiler::JumpList done;
3148         if (shouldCheckOverflow(node->arithMode())) {
3149             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3150             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3151         } else {
3152             // This is the case where we convert the result to an int after we're done, and we
3153             // already know that the denominator is either -1 or 0. So, if the denominator is
3154             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3155             // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
3156             // are happy to fall through to a normal division, since we're just dividing
3157             // something by negative 1.
3158         
3159             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3160             m_jit.move(TrustedImm32(0), eax.gpr());
3161             done.append(m_jit.jump());
3162         
3163             notZero.link(&m_jit);
3164             JITCompiler::Jump notNeg2ToThe31 =
3165                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3166             m_jit.zeroExtend32ToPtr(op1GPR, eax.gpr());
3167             done.append(m_jit.jump());
3168         
3169             notNeg2ToThe31.link(&m_jit);
3170         }
3171     
3172         safeDenominator.link(&m_jit);
3173     
3174         // If the user cares about negative zero, then speculate that we're not about
3175         // to produce negative zero.
3176         if (shouldCheckNegativeZero(node->arithMode())) {
3177             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3178             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3179             numeratorNonZero.link(&m_jit);
3180         }
3181     
3182         if (op2TempGPR != InvalidGPRReg) {
3183             m_jit.move(op2GPR, op2TempGPR);
3184             op2GPR = op2TempGPR;
3185         }
3186             
3187         m_jit.move(op1GPR, eax.gpr());
3188         m_jit.assembler().cdq();
3189         m_jit.assembler().idivl_r(op2GPR);
3190             
3191         if (op2TempGPR != InvalidGPRReg)
3192             unlock(op2TempGPR);
3193
3194         // Check that there was no remainder. If there had been, then we'd be obligated to
3195         // produce a double result instead.
3196         if (shouldCheckOverflow(node->arithMode()))
3197             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
3198         
3199         done.link(&m_jit);
3200         int32Result(eax.gpr(), node);
3201 #elif CPU(APPLE_ARMV7S) || CPU(ARM64)
3202         SpeculateInt32Operand op1(this, node->child1());
3203         SpeculateInt32Operand op2(this, node->child2());
3204         GPRReg op1GPR = op1.gpr();
3205         GPRReg op2GPR = op2.gpr();
3206         GPRTemporary quotient(this);
3207         GPRTemporary multiplyAnswer(this);
3208
3209         // If the user cares about negative zero, then speculate that we're not about
3210         // to produce negative zero.
3211         if (shouldCheckNegativeZero(node->arithMode())) {
3212             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3213             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3214             numeratorNonZero.link(&m_jit);
3215         }
3216
3217         m_jit.assembler().sdiv<32>(quotient.gpr(), op1GPR, op2GPR);
3218
3219         // Check that there was no remainder. If there had been, then we'd be obligated to
3220         // produce a double result instead.
3221         if (shouldCheckOverflow(node->arithMode())) {
3222             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
3223             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
3224         }
3225
3226         int32Result(quotient.gpr(), node);
3227 #else
3228         RELEASE_ASSERT_NOT_REACHED();
3229 #endif
3230         break;
3231     }
3232         
3233     case DoubleRepUse: {
3234         SpeculateDoubleOperand op1(this, node->child1());
3235         SpeculateDoubleOperand op2(this, node->child2());
3236         FPRTemporary result(this, op1);
3237         
3238         FPRReg reg1 = op1.fpr();
3239         FPRReg reg2 = op2.fpr();
3240         m_jit.divDouble(reg1, reg2, result.fpr());
3241         
3242         doubleResult(result.fpr(), node);
3243         break;
3244     }
3245         
3246     default:
3247         RELEASE_ASSERT_NOT_REACHED();
3248         break;
3249     }
3250 }
3251
3252 void SpeculativeJIT::compileArithMod(Node* node)
3253 {
3254     switch (node->binaryUseKind()) {
3255     case Int32Use: {
3256         // In the fast path, the dividend value could be the final result
3257         // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
3258         SpeculateStrictInt32Operand op1(this, node->child1());
3259         
3260         if (node->child2()->isInt32Constant()) {
3261             int32_t divisor = node->child2()->asInt32();
3262             if (divisor > 1 && hasOneBitSet(divisor)) {
3263                 unsigned logarithm = WTF::fastLog2(divisor);
3264                 GPRReg dividendGPR = op1.gpr();
3265                 GPRTemporary result(this);
3266                 GPRReg resultGPR = result.gpr();
3267
3268                 // This is what LLVM generates. It's pretty crazy. Here's my
3269                 // attempt at understanding it.
3270                 
3271                 // First, compute either divisor - 1, or 0, depending on whether
3272                 // the dividend is negative:
3273                 //
3274                 // If dividend < 0:  resultGPR = divisor - 1
3275                 // If dividend >= 0: resultGPR = 0
3276                 m_jit.move(dividendGPR, resultGPR);
3277                 m_jit.rshift32(TrustedImm32(31), resultGPR);
3278                 m_jit.urshift32(TrustedImm32(32 - logarithm), resultGPR);
3279                 
3280                 // Add in the dividend, so that:
3281                 //
3282                 // If dividend < 0:  resultGPR = dividend + divisor - 1
3283                 // If dividend >= 0: resultGPR = dividend
3284                 m_jit.add32(dividendGPR, resultGPR);
3285                 
3286                 // Mask so as to only get the *high* bits. This rounds down
3287                 // (towards negative infinity) resultGPR to the nearest multiple
3288                 // of divisor, so that:
3289                 //
3290                 // If dividend < 0:  resultGPR = floor((dividend + divisor - 1) / divisor)
3291                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3292                 //
3293                 // Note that this can be simplified to:
3294                 //
3295                 // If dividend < 0:  resultGPR = ceil(dividend / divisor)
3296                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3297                 //
3298                 // Note that if the dividend is negative, resultGPR will also be negative.
3299                 // Regardless of the sign of dividend, resultGPR will be rounded towards
3300                 // zero, because of how things are conditionalized.
3301                 m_jit.and32(TrustedImm32(-divisor), resultGPR);
3302                 
3303                 // Subtract resultGPR from dividendGPR, which yields the remainder:
3304                 //
3305                 // resultGPR = dividendGPR - resultGPR
3306                 m_jit.neg32(resultGPR);
3307                 m_jit.add32(dividendGPR, resultGPR);
3308                 
3309                 if (shouldCheckNegativeZero(node->arithMode())) {
3310                     // Check that we're not about to create negative zero.
3311                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3312                     speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, resultGPR));
3313                     numeratorPositive.link(&m_jit);
3314                 }
3315
3316                 int32Result(resultGPR, node);
3317                 return;
3318             }
3319         }
3320         
3321 #if CPU(X86) || CPU(X86_64)
3322         if (node->child2()->isInt32Constant()) {
3323             int32_t divisor = node->child2()->asInt32();
3324             if (divisor && divisor != -1) {
3325                 GPRReg op1Gpr = op1.gpr();
3326
3327                 GPRTemporary eax(this, X86Registers::eax);
3328                 GPRTemporary edx(this, X86Registers::edx);
3329                 GPRTemporary scratch(this);
3330                 GPRReg scratchGPR = scratch.gpr();
3331
3332                 GPRReg op1SaveGPR;
3333                 if (op1Gpr == X86Registers::eax || op1Gpr == X86Registers::edx) {
3334                     op1SaveGPR = allocate();
3335                     ASSERT(op1Gpr != op1SaveGPR);
3336                     m_jit.move(op1Gpr, op1SaveGPR);
3337                 } else
3338                     op1SaveGPR = op1Gpr;
3339                 ASSERT(op1SaveGPR != X86Registers::eax);
3340                 ASSERT(op1SaveGPR != X86Registers::edx);
3341
3342                 m_jit.move(op1Gpr, eax.gpr());
3343                 m_jit.move(TrustedImm32(divisor), scratchGPR);
3344                 m_jit.assembler().cdq();
3345                 m_jit.assembler().idivl_r(scratchGPR);
3346                 if (shouldCheckNegativeZero(node->arithMode())) {
3347                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3348                     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3349                     numeratorPositive.link(&m_jit);
3350                 }
3351             
3352                 if (op1SaveGPR != op1Gpr)
3353                     unlock(op1SaveGPR);
3354
3355                 int32Result(edx.gpr(), node);
3356                 return;
3357             }
3358         }
3359 #endif
3360
3361         SpeculateInt32Operand op2(this, node->child2());
3362 #if CPU(X86) || CPU(X86_64)
3363         GPRTemporary eax(this, X86Registers::eax);
3364         GPRTemporary edx(this, X86Registers::edx);
3365         GPRReg op1GPR = op1.gpr();
3366         GPRReg op2GPR = op2.gpr();
3367     
3368         GPRReg op2TempGPR;
3369         GPRReg temp;
3370         GPRReg op1SaveGPR;
3371     
3372         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3373             op2TempGPR = allocate();
3374             temp = op2TempGPR;
3375         } else {
3376             op2TempGPR = InvalidGPRReg;
3377             if (op1GPR == X86Registers::eax)
3378                 temp = X86Registers::edx;
3379             else
3380                 temp = X86Registers::eax;
3381         }
3382     
3383         if (op1GPR == X86Registers::eax || op1GPR == X86Registers::edx) {
3384             op1SaveGPR = allocate();
3385             ASSERT(op1GPR != op1SaveGPR);
3386             m_jit.move(op1GPR, op1SaveGPR);
3387         } else
3388             op1SaveGPR = op1GPR;
3389     
3390         ASSERT(temp != op1GPR);
3391         ASSERT(temp != op2GPR);
3392         ASSERT(op1SaveGPR != X86Registers::eax);
3393         ASSERT(op1SaveGPR != X86Registers::edx);
3394     
3395         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3396     
3397         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3398     
3399         JITCompiler::JumpList done;
3400         
3401         // FIXME: -2^31 / -1 will actually yield negative zero, so we could have a
3402         // separate case for that. But it probably doesn't matter so much.
3403         if (shouldCheckOverflow(node->arithMode())) {
3404             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3405             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3406         } else {
3407             // This is the case where we convert the result to an int after we're done, and we
3408             // already know that the denominator is either -1 or 0. So, if the denominator is
3409             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3410             // -1) and the numerator is -2^31 then the result should be 0. Otherwise we are
3411             // happy to fall through to a normal division, since we're just dividing something
3412             // by negative 1.
3413         
3414             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3415             m_jit.move(TrustedImm32(0), edx.gpr());
3416             done.append(m_jit.jump());
3417         
3418             notZero.link(&m_jit);
3419             JITCompiler::Jump notNeg2ToThe31 =
3420                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3421             m_jit.move(TrustedImm32(0), edx.gpr());
3422             done.append(m_jit.jump());
3423         
3424             notNeg2ToThe31.link(&m_jit);
3425         }
3426         
3427         safeDenominator.link(&m_jit);
3428             
3429         if (op2TempGPR != InvalidGPRReg) {
3430             m_jit.move(op2GPR, op2TempGPR);
3431             op2GPR = op2TempGPR;
3432         }
3433             
3434         m_jit.move(op1GPR, eax.gpr());
3435         m_jit.assembler().cdq();
3436         m_jit.assembler().idivl_r(op2GPR);
3437             
3438         if (op2TempGPR != InvalidGPRReg)
3439             unlock(op2TempGPR);
3440
3441         // Check that we're not about to create negative zero.
3442         if (shouldCheckNegativeZero(node->arithMode())) {
3443             JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3444             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3445             numeratorPositive.link(&m_jit);
3446         }
3447     
3448         if (op1SaveGPR != op1GPR)
3449             unlock(op1SaveGPR);
3450             
3451         done.link(&m_jit);
3452         int32Result(edx.gpr(), node);
3453
3454 #elif CPU(ARM64) || CPU(APPLE_ARMV7S)
3455         GPRTemporary temp(this);
3456         GPRTemporary quotientThenRemainder(this);
3457         GPRTemporary multiplyAnswer(this);
3458         GPRReg dividendGPR = op1.gpr();
3459         GPRReg divisorGPR = op2.gpr();
3460         GPRReg quotientThenRemainderGPR = quotientThenRemainder.gpr();
3461         GPRReg multiplyAnswerGPR = multiplyAnswer.gpr();
3462
3463         JITCompiler::JumpList done;
3464     
3465         if (shouldCheckOverflow(node->arithMode()))
3466             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, divisorGPR));
3467         else {
3468             JITCompiler::Jump denominatorNotZero = m_jit.branchTest32(JITCompiler::NonZero, divisorGPR);
3469             m_jit.move(divisorGPR, quotientThenRemainderGPR);
3470             done.append(m_jit.jump());
3471             denominatorNotZero.link(&m_jit);
3472         }
3473
3474         m_jit.assembler().sdiv<32>(quotientThenRemainderGPR, dividendGPR, divisorGPR);
3475         // FIXME: It seems like there are cases where we don't need this? What if we have
3476         // arithMode() == Arith::Unchecked?
3477         // https://bugs.webkit.org/show_bug.cgi?id=126444
3478         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotientThenRemainderGPR, divisorGPR, multiplyAnswerGPR));
3479 #if CPU(APPLE_ARMV7S)
3480         m_jit.assembler().sub(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3481 #else
3482         m_jit.assembler().sub<32>(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3483 #endif
3484
3485         // If the user cares about negative zero, then speculate that we're not about
3486         // to produce negative zero.
3487         if (shouldCheckNegativeZero(node->arithMode())) {
3488             // Check that we're not about to create negative zero.
3489             JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3490             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, quotientThenRemainderGPR));
3491             numeratorPositive.link(&m_jit);
3492         }
3493
3494         done.link(&m_jit);
3495
3496         int32Result(quotientThenRemainderGPR, node);
3497 #else // not architecture that can do integer division
3498         RELEASE_ASSERT_NOT_REACHED();
3499 #endif
3500         return;
3501     }
3502         
3503     case DoubleRepUse: {
3504         SpeculateDoubleOperand op1(this, node->child1());
3505         SpeculateDoubleOperand op2(this, node->child2());
3506         
3507         FPRReg op1FPR = op1.fpr();
3508         FPRReg op2FPR = op2.fpr();
3509         
3510         flushRegisters();
3511         
3512         FPRResult result(this);
3513         
3514         callOperation(fmodAsDFGOperation, result.fpr(), op1FPR, op2FPR);
3515         
3516         doubleResult(result.fpr(), node);
3517         return;
3518     }
3519         
3520     default:
3521         RELEASE_ASSERT_NOT_REACHED();
3522         return;
3523     }
3524 }
3525
3526 void SpeculativeJIT::compileArithSqrt(Node* node)
3527 {
3528     SpeculateDoubleOperand op1(this, node->child1());
3529     FPRReg op1FPR = op1.fpr();
3530
3531     if (!MacroAssembler::supportsFloatingPointSqrt() || !Options::enableArchitectureSpecificOptimizations()) {
3532         flushRegisters();
3533         FPRResult result(this);
3534         callOperation(sqrt, result.fpr(), op1FPR);
3535         doubleResult(result.fpr(), node);
3536     } else {
3537         FPRTemporary result(this, op1);
3538         m_jit.sqrtDouble(op1.fpr(), result.fpr());
3539         doubleResult(result.fpr(), node);
3540     }
3541 }
3542
3543 // For small positive integers , it is worth doing a tiny inline loop to exponentiate the base.
3544 // Every register is clobbered by this helper.
3545 static MacroAssembler::Jump compileArithPowIntegerFastPath(JITCompiler& assembler, FPRReg xOperand, GPRReg yOperand, FPRReg result)
3546 {
3547     MacroAssembler::JumpList skipFastPath;
3548     skipFastPath.append(assembler.branch32(MacroAssembler::LessThan, yOperand, MacroAssembler::TrustedImm32(0)));
3549     skipFastPath.append(assembler.branch32(MacroAssembler::GreaterThan, yOperand, MacroAssembler::TrustedImm32(1000)));
3550
3551     static const double oneConstant = 1.0;
3552     assembler.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant), result);
3553
3554     MacroAssembler::Label startLoop(assembler.label());
3555     MacroAssembler::Jump exponentIsEven = assembler.branchTest32(MacroAssembler::Zero, yOperand, MacroAssembler::TrustedImm32(1));
3556     assembler.mulDouble(xOperand, result);
3557     exponentIsEven.link(&assembler);
3558     assembler.mulDouble(xOperand, xOperand);
3559     assembler.rshift32(MacroAssembler::TrustedImm32(1), yOperand);
3560     assembler.branchTest32(MacroAssembler::NonZero, yOperand).linkTo(startLoop, &assembler);
3561
3562     MacroAssembler::Jump skipSlowPath = assembler.jump();
3563     skipFastPath.link(&assembler);
3564
3565     return skipSlowPath;
3566 }
3567
3568 void SpeculativeJIT::compileArithPow(Node* node)
3569 {
3570     if (node->child2().useKind() == Int32Use) {
3571         SpeculateDoubleOperand xOperand(this, node->child1());
3572         SpeculateInt32Operand yOperand(this, node->child2());
3573         FPRReg xOperandfpr = xOperand.fpr();
3574         GPRReg yOperandGpr = yOperand.gpr();
3575         FPRTemporary yOperandfpr(this);
3576
3577         flushRegisters();
3578
3579         FPRResult result(this);
3580         FPRReg resultFpr = result.fpr();
3581
3582         FPRTemporary xOperandCopy(this);
3583         FPRReg xOperandCopyFpr = xOperandCopy.fpr();
3584         m_jit.moveDouble(xOperandfpr, xOperandCopyFpr);
3585
3586         GPRTemporary counter(this);
3587         GPRReg counterGpr = counter.gpr();
3588         m_jit.move(yOperandGpr, counterGpr);
3589
3590         MacroAssembler::Jump skipFallback = compileArithPowIntegerFastPath(m_jit, xOperandCopyFpr, counterGpr, resultFpr);
3591         m_jit.convertInt32ToDouble(yOperandGpr, yOperandfpr.fpr());
3592         callOperation(operationMathPow, resultFpr, xOperandfpr, yOperandfpr.fpr());
3593
3594         skipFallback.link(&m_jit);
3595         doubleResult(resultFpr, node);
3596         return;
3597     }
3598
3599     SpeculateDoubleOperand xOperand(this, node->child1());
3600     SpeculateDoubleOperand yOperand(this, node->child2());
3601     FPRReg xOperandfpr = xOperand.fpr();
3602     FPRReg yOperandfpr = yOperand.fpr();
3603
3604     flushRegisters();
3605
3606     FPRResult result(this);
3607     FPRReg resultFpr = result.fpr();
3608
3609     FPRTemporary xOperandCopy(this);
3610     FPRReg xOperandCopyFpr = xOperandCopy.fpr();
3611
3612     FPRTemporary scratch(this);
3613     FPRReg scratchFpr = scratch.fpr();
3614
3615     GPRTemporary yOperandInteger(this);
3616     GPRReg yOperandIntegerGpr = yOperandInteger.gpr();
3617     MacroAssembler::JumpList failedExponentConversionToInteger;
3618     m_jit.branchConvertDoubleToInt32(yOperandfpr, yOperandIntegerGpr, failedExponentConversionToInteger, scratchFpr, false);
3619
3620     m_jit.moveDouble(xOperandfpr, xOperandCopyFpr);
3621     MacroAssembler::Jump skipFallback = compileArithPowIntegerFastPath(m_jit, xOperandCopyFpr, yOperandInteger.gpr(), resultFpr);
3622     failedExponentConversionToInteger.link(&m_jit);
3623
3624     callOperation(operationMathPow, resultFpr, xOperandfpr, yOperandfpr);
3625     skipFallback.link(&m_jit);
3626     doubleResult(resultFpr, node);
3627 }
3628
3629 // Returns true if the compare is fused with a subsequent branch.
3630 bool SpeculativeJIT::compare(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
3631 {
3632     if (compilePeepHoleBranch(node, condition, doubleCondition, operation))
3633         return true;
3634
3635     if (node->isBinaryUseKind(Int32Use)) {
3636         compileInt32Compare(node, condition);
3637         return false;
3638     }
3639     
3640 #if USE(JSVALUE64)
3641     if (node->isBinaryUseKind(Int52RepUse)) {
3642         compileInt52Compare(node, condition);
3643         return false;
3644     }
3645 #endif // USE(JSVALUE64)
3646     
3647     if (node->isBinaryUseKind(DoubleRepUse)) {
3648         compileDoubleCompare(node, doubleCondition);
3649         return false;
3650     }
3651     
3652     if (node->op() == CompareEq) {
3653         if (node->isBinaryUseKind(StringUse)) {
3654             compileStringEquality(node);
3655             return false;
3656         }
3657         
3658         if (node->isBinaryUseKind(BooleanUse)) {
3659             compileBooleanCompare(node, condition);
3660             return false;
3661         }
3662
3663         if (node->isBinaryUseKind(StringIdentUse)) {
3664             compileStringIdentEquality(node);
3665             return false;
3666         }
3667         
3668         if (node->isBinaryUseKind(ObjectUse)) {
3669             compileObjectEquality(node);
3670             return false;
3671         }
3672         
3673         if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse)) {
3674             compileObjectToObjectOrOtherEquality(node->child1(), node->child2());
3675             return false;
3676         }
3677         
3678         if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse)) {
3679             compileObjectToObjectOrOtherEquality(node->child2(), node->child1());
3680             return false;
3681         }
3682     }
3683     
3684     nonSpeculativeNonPeepholeCompare(node, condition, operation);
3685     return false;
3686 }
3687
3688 bool SpeculativeJIT::compileStrictEq(Node* node)
3689 {
3690     if (node->isBinaryUseKind(BooleanUse)) {
3691         unsigned branchIndexInBlock = detectPeepHoleBranch();
3692         if (branchIndexInBlock != UINT_MAX) {
3693             Node* branchNode = m_block->at(branchIndexInBlock);
3694             compilePeepHoleBooleanBranch(node, branchNode, MacroAssembler::Equal);
3695             use(node->child1());
3696             use(node->child2());
3697             m_indexInBlock = branchIndexInBlock;
3698             m_currentNode = branchNode;
3699             return true;
3700         }
3701         compileBooleanCompare(node, MacroAssembler::Equal);
3702         return false;
3703     }
3704
3705     if (node->isBinaryUseKind(Int32Use)) {
3706         unsigned branchIndexInBlock = detectPeepHoleBranch();
3707         if (branchIndexInBlock != UINT_MAX) {
3708             Node* branchNode = m_block->at(branchIndexInBlock);
3709             compilePeepHoleInt32Branch(node, branchNode, MacroAssembler::Equal);
3710             use(node->child1());
3711             use(node->child2());
3712             m_indexInBlock = branchIndexInBlock;
3713             m_currentNode = branchNode;
3714             return true;
3715         }
3716         compileInt32Compare(node, MacroAssembler::Equal);
3717         return false;
3718     }
3719     
3720 #if USE(JSVALUE64)   
3721     if (node->isBinaryUseKind(Int52RepUse)) {
3722         unsigned branchIndexInBlock = detectPeepHoleBranch();
3723         if (branchIndexInBlock != UINT_MAX) {
3724             Node* branchNode = m_block->at(branchIndexInBlock);
3725             compilePeepHoleInt52Branch(node, branchNode, MacroAssembler::Equal);
3726             use(node->child1());
3727             use(node->child2());
3728             m_indexInBlock = branchIndexInBlock;
3729             m_currentNode = branchNode;
3730             return true;
3731         }
3732         compileInt52Compare(node, MacroAssembler::Equal);
3733         return false;
3734     }
3735 #endif // USE(JSVALUE64)
3736
3737     if (node->isBinaryUseKind(DoubleRepUse)) {
3738         unsigned branchIndexInBlock = detectPeepHoleBranch();
3739         if (branchIndexInBlock != UINT_MAX) {
3740             Node* branchNode = m_block->at(branchIndexInBlock);
3741             compilePeepHoleDoubleBranch(node, branchNode, MacroAssembler::DoubleEqual);
3742             use(node->child1());
3743             use(node->child2());
3744             m_indexInBlock = branchIndexInBlock;
3745             m_currentNode = branchNode;
3746             return true;
3747         }
3748         compileDoubleCompare(node, MacroAssembler::DoubleEqual);
3749         return false;
3750     }
3751     
3752     if (node->isBinaryUseKind(StringUse)) {
3753         compileStringEquality(node);
3754         return false;
3755     }
3756     
3757     if (node->isBinaryUseKind(StringIdentUse)) {
3758         compileStringIdentEquality(node);
3759         return false;
3760     }
3761
3762     if (node->isBinaryUseKind(ObjectUse)) {
3763         unsigned branchIndexInBlock = detectPeepHoleBranch();
3764         if (branchIndexInBlock != UINT_MAX) {
3765             Node* branchNode = m_block->at(branchIndexInBlock);
3766             compilePeepHoleObjectEquality(node, branchNode);
3767             use(node->child1());
3768             use(node->child2());
3769             m_indexInBlock = branchIndexInBlock;
3770             m_currentNode = branchNode;
3771             return true;
3772         }
3773         compileObjectEquality(node);
3774         return false;
3775     }
3776
3777     if (node->isBinaryUseKind(MiscUse, UntypedUse)
3778         || node->isBinaryUseKind(UntypedUse, MiscUse)) {
3779         compileMiscStrictEq(node);
3780         return false;
3781     }
3782     
3783     if (node->isBinaryUseKind(StringIdentUse, NotStringVarUse)) {
3784         compileStringIdentToNotStringVarEquality(node, node->child1(), node->child2());
3785         return false;
3786     }
3787     
3788     if (node->isBinaryUseKind(NotStringVarUse, StringIdentUse)) {
3789         compileStringIdentToNotStringVarEquality(node, node->child2(), node->child1());
3790         return false;
3791     }
3792     
3793     if (node->isBinaryUseKind(StringUse, UntypedUse)) {
3794         compileStringToUntypedEquality(node, node->child1(), node->child2());
3795         return false;
3796     }
3797     
3798     if (node->isBinaryUseKind(UntypedUse, StringUse)) {
3799         compileStringToUntypedEquality(node, node->child2(), node->child1());
3800    &