Rename checkMarkByte() to jumpIfIsRememberedOrInEden().
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "Arguments.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGBinarySwitch.h"
35 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGSaneStringGetByValSlowPathGenerator.h"
38 #include "DFGSlowPathGenerator.h"
39 #include "LinkBuffer.h"
40 #include "JSCInlines.h"
41 #include "ScratchRegisterAllocator.h"
42 #include "WriteBarrierBuffer.h"
43 #include <wtf/MathExtras.h>
44
45 namespace JSC { namespace DFG {
46
47 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
48     : m_compileOkay(true)
49     , m_jit(jit)
50     , m_currentNode(0)
51     , m_lastGeneratedNode(LastNodeType)
52     , m_indexInBlock(0)
53     , m_generationInfo(m_jit.graph().frameRegisterCount())
54     , m_state(m_jit.graph())
55     , m_interpreter(m_jit.graph(), m_state)
56     , m_stream(&jit.jitCode()->variableEventStream)
57     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
58     , m_isCheckingArgumentTypes(false)
59 {
60 }
61
62 SpeculativeJIT::~SpeculativeJIT()
63 {
64 }
65
66 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
67 {
68     ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
69     
70     GPRTemporary scratch(this);
71     GPRTemporary scratch2(this);
72     GPRReg scratchGPR = scratch.gpr();
73     GPRReg scratch2GPR = scratch2.gpr();
74     
75     unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
76     
77     JITCompiler::JumpList slowCases;
78     
79     slowCases.append(
80         emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
81     m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
82     emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
83     
84     m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
85     m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
86     
87     if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
88 #if USE(JSVALUE64)
89         m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
90         for (unsigned i = numElements; i < vectorLength; ++i)
91             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
92 #else
93         EncodedValueDescriptor value;
94         value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
95         for (unsigned i = numElements; i < vectorLength; ++i) {
96             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
97             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
98         }
99 #endif
100     }
101     
102     // I want a slow path that also loads out the storage pointer, and that's
103     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
104     // of work for a very small piece of functionality. :-/
105     addSlowPathGenerator(adoptPtr(
106         new CallArrayAllocatorSlowPathGenerator(
107             slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
108             structure, numElements)));
109 }
110
111 void SpeculativeJIT::emitAllocateArguments(GPRReg resultGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath)
112 {
113     Structure* structure = m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)->argumentsStructure();
114
115     m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), scratchGPR1);
116     m_jit.mul32(TrustedImm32(sizeof(JSValue)), scratchGPR1, scratchGPR1);
117     m_jit.add32(TrustedImm32(Arguments::offsetOfInlineRegisterArray()), scratchGPR1);
118     emitAllocateVariableSizedJSObject<Arguments>(resultGPR, structure, scratchGPR1, scratchGPR1, scratchGPR2, slowPath);
119
120     m_jit.storePtr(TrustedImmPtr(0), MacroAssembler::Address(resultGPR, Arguments::offsetOfActivation()));
121
122     m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), scratchGPR1);
123     m_jit.sub32(TrustedImm32(1), scratchGPR1);
124     m_jit.store32(scratchGPR1, MacroAssembler::Address(resultGPR, Arguments::offsetOfNumArguments()));
125
126     m_jit.store32(TrustedImm32(0), MacroAssembler::Address(resultGPR, Arguments::offsetOfOverrodeLength()));
127     if (m_jit.isStrictModeFor(m_currentNode->origin.semantic))
128         m_jit.store8(TrustedImm32(1), MacroAssembler::Address(resultGPR, Arguments::offsetOfIsStrictMode()));
129
130     m_jit.storePtr(GPRInfo::callFrameRegister, MacroAssembler::Address(resultGPR, Arguments::offsetOfRegisters()));
131     m_jit.storePtr(TrustedImmPtr(0), MacroAssembler::Address(resultGPR, Arguments::offsetOfSlowArgumentData()));
132
133     m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), scratchGPR1);
134     m_jit.storePtr(scratchGPR1, MacroAssembler::Address(resultGPR, Arguments::offsetOfCallee()));
135
136 }
137
138 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
139 {
140     if (!m_compileOkay)
141         return;
142     ASSERT(m_isCheckingArgumentTypes || m_canExit);
143     m_jit.appendExitInfo(jumpToFail);
144     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
145 }
146
147 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
148 {
149     if (!m_compileOkay)
150         return;
151     ASSERT(m_isCheckingArgumentTypes || m_canExit);
152     m_jit.appendExitInfo(jumpsToFail);
153     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
154 }
155
156 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
157 {
158     if (!m_compileOkay)
159         return OSRExitJumpPlaceholder();
160     ASSERT(m_isCheckingArgumentTypes || m_canExit);
161     unsigned index = m_jit.jitCode()->osrExit.size();
162     m_jit.appendExitInfo();
163     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
164     return OSRExitJumpPlaceholder(index);
165 }
166
167 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
168 {
169     ASSERT(m_isCheckingArgumentTypes || m_canExit);
170     return speculationCheck(kind, jsValueSource, nodeUse.node());
171 }
172
173 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
174 {
175     ASSERT(m_isCheckingArgumentTypes || m_canExit);
176     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
177 }
178
179 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
180 {
181     ASSERT(m_isCheckingArgumentTypes || m_canExit);
182     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
183 }
184
185 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
186 {
187     if (!m_compileOkay)
188         return;
189     ASSERT(m_isCheckingArgumentTypes || m_canExit);
190     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
191     m_jit.appendExitInfo(jumpToFail);
192     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
193 }
194
195 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
196 {
197     ASSERT(m_isCheckingArgumentTypes || m_canExit);
198     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
199 }
200
201 void SpeculativeJIT::emitInvalidationPoint(Node* node)
202 {
203     if (!m_compileOkay)
204         return;
205     ASSERT(m_canExit);
206     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
207     m_jit.jitCode()->appendOSRExit(OSRExit(
208         UncountableInvalidation, JSValueSource(),
209         m_jit.graph().methodOfGettingAValueProfileFor(node),
210         this, m_stream->size()));
211     info.m_replacementSource = m_jit.watchpointLabel();
212     ASSERT(info.m_replacementSource.isSet());
213     noResult(node);
214 }
215
216 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
217 {
218     ASSERT(m_isCheckingArgumentTypes || m_canExit);
219     if (!m_compileOkay)
220         return;
221     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
222     m_compileOkay = false;
223     if (verboseCompilationEnabled())
224         dataLog("Bailing compilation.\n");
225 }
226
227 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
228 {
229     ASSERT(m_isCheckingArgumentTypes || m_canExit);
230     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
231 }
232
233 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
234 {
235     ASSERT(needsTypeCheck(edge, typesPassedThrough));
236     m_interpreter.filter(edge, typesPassedThrough);
237     speculationCheck(BadType, source, edge.node(), jumpToFail);
238 }
239
240 RegisterSet SpeculativeJIT::usedRegisters()
241 {
242     RegisterSet result;
243     
244     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
245         GPRReg gpr = GPRInfo::toRegister(i);
246         if (m_gprs.isInUse(gpr))
247             result.set(gpr);
248     }
249     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
250         FPRReg fpr = FPRInfo::toRegister(i);
251         if (m_fprs.isInUse(fpr))
252             result.set(fpr);
253     }
254     
255     result.merge(RegisterSet::specialRegisters());
256     
257     return result;
258 }
259
260 void SpeculativeJIT::addSlowPathGenerator(PassOwnPtr<SlowPathGenerator> slowPathGenerator)
261 {
262     m_slowPathGenerators.append(slowPathGenerator);
263 }
264
265 void SpeculativeJIT::runSlowPathGenerators()
266 {
267     for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
268         m_slowPathGenerators[i]->generate(this);
269 }
270
271 // On Windows we need to wrap fmod; on other platforms we can call it directly.
272 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
273 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
274 static double JIT_OPERATION fmodAsDFGOperation(double x, double y)
275 {
276     return fmod(x, y);
277 }
278 #else
279 #define fmodAsDFGOperation fmod
280 #endif
281
282 void SpeculativeJIT::clearGenerationInfo()
283 {
284     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
285         m_generationInfo[i] = GenerationInfo();
286     m_gprs = RegisterBank<GPRInfo>();
287     m_fprs = RegisterBank<FPRInfo>();
288 }
289
290 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
291 {
292     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
293     Node* node = info.node();
294     DataFormat registerFormat = info.registerFormat();
295     ASSERT(registerFormat != DataFormatNone);
296     ASSERT(registerFormat != DataFormatDouble);
297         
298     SilentSpillAction spillAction;
299     SilentFillAction fillAction;
300         
301     if (!info.needsSpill())
302         spillAction = DoNothingForSpill;
303     else {
304 #if USE(JSVALUE64)
305         ASSERT(info.gpr() == source);
306         if (registerFormat == DataFormatInt32)
307             spillAction = Store32Payload;
308         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
309             spillAction = StorePtr;
310         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
311             spillAction = Store64;
312         else {
313             ASSERT(registerFormat & DataFormatJS);
314             spillAction = Store64;
315         }
316 #elif USE(JSVALUE32_64)
317         if (registerFormat & DataFormatJS) {
318             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
319             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
320         } else {
321             ASSERT(info.gpr() == source);
322             spillAction = Store32Payload;
323         }
324 #endif
325     }
326         
327     if (registerFormat == DataFormatInt32) {
328         ASSERT(info.gpr() == source);
329         ASSERT(isJSInt32(info.registerFormat()));
330         if (node->hasConstant()) {
331             ASSERT(node->isInt32Constant());
332             fillAction = SetInt32Constant;
333         } else
334             fillAction = Load32Payload;
335     } else if (registerFormat == DataFormatBoolean) {
336 #if USE(JSVALUE64)
337         RELEASE_ASSERT_NOT_REACHED();
338 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
339         fillAction = DoNothingForFill;
340 #endif
341 #elif USE(JSVALUE32_64)
342         ASSERT(info.gpr() == source);
343         if (node->hasConstant()) {
344             ASSERT(node->isBooleanConstant());
345             fillAction = SetBooleanConstant;
346         } else
347             fillAction = Load32Payload;
348 #endif
349     } else if (registerFormat == DataFormatCell) {
350         ASSERT(info.gpr() == source);
351         if (node->hasConstant()) {
352             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
353             node->asCell(); // To get the assertion.
354             fillAction = SetCellConstant;
355         } else {
356 #if USE(JSVALUE64)
357             fillAction = LoadPtr;
358 #else
359             fillAction = Load32Payload;
360 #endif
361         }
362     } else if (registerFormat == DataFormatStorage) {
363         ASSERT(info.gpr() == source);
364         fillAction = LoadPtr;
365     } else if (registerFormat == DataFormatInt52) {
366         if (node->hasConstant())
367             fillAction = SetInt52Constant;
368         else if (info.spillFormat() == DataFormatInt52)
369             fillAction = Load64;
370         else if (info.spillFormat() == DataFormatStrictInt52)
371             fillAction = Load64ShiftInt52Left;
372         else if (info.spillFormat() == DataFormatNone)
373             fillAction = Load64;
374         else {
375             RELEASE_ASSERT_NOT_REACHED();
376 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
377             fillAction = Load64; // Make GCC happy.
378 #endif
379         }
380     } else if (registerFormat == DataFormatStrictInt52) {
381         if (node->hasConstant())
382             fillAction = SetStrictInt52Constant;
383         else if (info.spillFormat() == DataFormatInt52)
384             fillAction = Load64ShiftInt52Right;
385         else if (info.spillFormat() == DataFormatStrictInt52)
386             fillAction = Load64;
387         else if (info.spillFormat() == DataFormatNone)
388             fillAction = Load64;
389         else {
390             RELEASE_ASSERT_NOT_REACHED();
391 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
392             fillAction = Load64; // Make GCC happy.
393 #endif
394         }
395     } else {
396         ASSERT(registerFormat & DataFormatJS);
397 #if USE(JSVALUE64)
398         ASSERT(info.gpr() == source);
399         if (node->hasConstant()) {
400             if (node->isCellConstant())
401                 fillAction = SetTrustedJSConstant;
402             else
403                 fillAction = SetJSConstant;
404         } else if (info.spillFormat() == DataFormatInt32) {
405             ASSERT(registerFormat == DataFormatJSInt32);
406             fillAction = Load32PayloadBoxInt;
407         } else
408             fillAction = Load64;
409 #else
410         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
411         if (node->hasConstant())
412             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
413         else if (info.payloadGPR() == source)
414             fillAction = Load32Payload;
415         else { // Fill the Tag
416             switch (info.spillFormat()) {
417             case DataFormatInt32:
418                 ASSERT(registerFormat == DataFormatJSInt32);
419                 fillAction = SetInt32Tag;
420                 break;
421             case DataFormatCell:
422                 ASSERT(registerFormat == DataFormatJSCell);
423                 fillAction = SetCellTag;
424                 break;
425             case DataFormatBoolean:
426                 ASSERT(registerFormat == DataFormatJSBoolean);
427                 fillAction = SetBooleanTag;
428                 break;
429             default:
430                 fillAction = Load32Tag;
431                 break;
432             }
433         }
434 #endif
435     }
436         
437     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
438 }
439     
440 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
441 {
442     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
443     Node* node = info.node();
444     ASSERT(info.registerFormat() == DataFormatDouble);
445
446     SilentSpillAction spillAction;
447     SilentFillAction fillAction;
448         
449     if (!info.needsSpill())
450         spillAction = DoNothingForSpill;
451     else {
452         ASSERT(!node->hasConstant());
453         ASSERT(info.spillFormat() == DataFormatNone);
454         ASSERT(info.fpr() == source);
455         spillAction = StoreDouble;
456     }
457         
458 #if USE(JSVALUE64)
459     if (node->hasConstant()) {
460         node->asNumber(); // To get the assertion.
461         fillAction = SetDoubleConstant;
462     } else {
463         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
464         fillAction = LoadDouble;
465     }
466 #elif USE(JSVALUE32_64)
467     ASSERT(info.registerFormat() == DataFormatDouble);
468     if (node->hasConstant()) {
469         node->asNumber(); // To get the assertion.
470         fillAction = SetDoubleConstant;
471     } else
472         fillAction = LoadDouble;
473 #endif
474
475     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
476 }
477     
478 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
479 {
480     switch (plan.spillAction()) {
481     case DoNothingForSpill:
482         break;
483     case Store32Tag:
484         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
485         break;
486     case Store32Payload:
487         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
488         break;
489     case StorePtr:
490         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
491         break;
492 #if USE(JSVALUE64)
493     case Store64:
494         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
495         break;
496 #endif
497     case StoreDouble:
498         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
499         break;
500     default:
501         RELEASE_ASSERT_NOT_REACHED();
502     }
503 }
504     
505 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
506 {
507 #if USE(JSVALUE32_64)
508     UNUSED_PARAM(canTrample);
509 #endif
510     switch (plan.fillAction()) {
511     case DoNothingForFill:
512         break;
513     case SetInt32Constant:
514         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
515         break;
516 #if USE(JSVALUE64)
517     case SetInt52Constant:
518         m_jit.move(Imm64(plan.node()->asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
519         break;
520     case SetStrictInt52Constant:
521         m_jit.move(Imm64(plan.node()->asMachineInt()), plan.gpr());
522         break;
523 #endif // USE(JSVALUE64)
524     case SetBooleanConstant:
525         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
526         break;
527     case SetCellConstant:
528         m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
529         break;
530 #if USE(JSVALUE64)
531     case SetTrustedJSConstant:
532         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
533         break;
534     case SetJSConstant:
535         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
536         break;
537     case SetDoubleConstant:
538         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
539         m_jit.move64ToDouble(canTrample, plan.fpr());
540         break;
541     case Load32PayloadBoxInt:
542         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
543         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
544         break;
545     case Load32PayloadConvertToInt52:
546         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
547         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
548         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
549         break;
550     case Load32PayloadSignExtend:
551         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
552         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
553         break;
554 #else
555     case SetJSConstantTag:
556         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
557         break;
558     case SetJSConstantPayload:
559         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
560         break;
561     case SetInt32Tag:
562         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
563         break;
564     case SetCellTag:
565         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
566         break;
567     case SetBooleanTag:
568         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
569         break;
570     case SetDoubleConstant:
571         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
572         break;
573 #endif
574     case Load32Tag:
575         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
576         break;
577     case Load32Payload:
578         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
579         break;
580     case LoadPtr:
581         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
582         break;
583 #if USE(JSVALUE64)
584     case Load64:
585         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
586         break;
587     case Load64ShiftInt52Right:
588         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
589         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
590         break;
591     case Load64ShiftInt52Left:
592         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
593         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
594         break;
595 #endif
596     case LoadDouble:
597         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
598         break;
599     default:
600         RELEASE_ASSERT_NOT_REACHED();
601     }
602 }
603     
604 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
605 {
606     switch (arrayMode.arrayClass()) {
607     case Array::OriginalArray: {
608         CRASH();
609 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
610         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
611         return result;
612 #endif
613     }
614         
615     case Array::Array:
616         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
617         return m_jit.branch32(
618             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
619         
620     case Array::NonArray:
621     case Array::OriginalNonArray:
622         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
623         return m_jit.branch32(
624             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
625         
626     case Array::PossiblyArray:
627         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
628         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
629     }
630     
631     RELEASE_ASSERT_NOT_REACHED();
632     return JITCompiler::Jump();
633 }
634
635 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
636 {
637     JITCompiler::JumpList result;
638     
639     switch (arrayMode.type()) {
640     case Array::Int32:
641         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
642
643     case Array::Double:
644         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
645
646     case Array::Contiguous:
647         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
648
649     case Array::ArrayStorage:
650     case Array::SlowPutArrayStorage: {
651         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
652         
653         if (arrayMode.isJSArray()) {
654             if (arrayMode.isSlowPut()) {
655                 result.append(
656                     m_jit.branchTest32(
657                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
658                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
659                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
660                 result.append(
661                     m_jit.branch32(
662                         MacroAssembler::Above, tempGPR,
663                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
664                 break;
665             }
666             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
667             result.append(
668                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
669             break;
670         }
671         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
672         if (arrayMode.isSlowPut()) {
673             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
674             result.append(
675                 m_jit.branch32(
676                     MacroAssembler::Above, tempGPR,
677                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
678             break;
679         }
680         result.append(
681             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
682         break;
683     }
684     default:
685         CRASH();
686         break;
687     }
688     
689     return result;
690 }
691
692 void SpeculativeJIT::checkArray(Node* node)
693 {
694     ASSERT(node->arrayMode().isSpecific());
695     ASSERT(!node->arrayMode().doesConversion());
696     
697     SpeculateCellOperand base(this, node->child1());
698     GPRReg baseReg = base.gpr();
699     
700     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
701         noResult(m_currentNode);
702         return;
703     }
704     
705     const ClassInfo* expectedClassInfo = 0;
706     
707     switch (node->arrayMode().type()) {
708     case Array::String:
709         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
710         break;
711     case Array::Int32:
712     case Array::Double:
713     case Array::Contiguous:
714     case Array::ArrayStorage:
715     case Array::SlowPutArrayStorage: {
716         GPRTemporary temp(this);
717         GPRReg tempGPR = temp.gpr();
718         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
719         speculationCheck(
720             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
721             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
722         
723         noResult(m_currentNode);
724         return;
725     }
726     case Array::Arguments:
727         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ArgumentsType);
728
729         noResult(m_currentNode);
730         return;
731     default:
732         speculateCellTypeWithoutTypeFiltering(
733             node->child1(), baseReg,
734             typeForTypedArrayType(node->arrayMode().typedArrayType()));
735         noResult(m_currentNode);
736         return;
737     }
738     
739     RELEASE_ASSERT(expectedClassInfo);
740     
741     GPRTemporary temp(this);
742     GPRTemporary temp2(this);
743     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
744     speculationCheck(
745         BadType, JSValueSource::unboxedCell(baseReg), node,
746         m_jit.branchPtr(
747             MacroAssembler::NotEqual,
748             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
749             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
750     
751     noResult(m_currentNode);
752 }
753
754 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
755 {
756     ASSERT(node->arrayMode().doesConversion());
757     
758     GPRTemporary temp(this);
759     GPRTemporary structure;
760     GPRReg tempGPR = temp.gpr();
761     GPRReg structureGPR = InvalidGPRReg;
762     
763     if (node->op() != ArrayifyToStructure) {
764         GPRTemporary realStructure(this);
765         structure.adopt(realStructure);
766         structureGPR = structure.gpr();
767     }
768         
769     // We can skip all that comes next if we already have array storage.
770     MacroAssembler::JumpList slowPath;
771     
772     if (node->op() == ArrayifyToStructure) {
773         slowPath.append(m_jit.branchWeakStructure(
774             JITCompiler::NotEqual,
775             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
776             node->structure()));
777     } else {
778         m_jit.load8(
779             MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
780         
781         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
782     }
783     
784     addSlowPathGenerator(adoptPtr(new ArrayifySlowPathGenerator(
785         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR)));
786     
787     noResult(m_currentNode);
788 }
789
790 void SpeculativeJIT::arrayify(Node* node)
791 {
792     ASSERT(node->arrayMode().isSpecific());
793     
794     SpeculateCellOperand base(this, node->child1());
795     
796     if (!node->child2()) {
797         arrayify(node, base.gpr(), InvalidGPRReg);
798         return;
799     }
800     
801     SpeculateInt32Operand property(this, node->child2());
802     
803     arrayify(node, base.gpr(), property.gpr());
804 }
805
806 GPRReg SpeculativeJIT::fillStorage(Edge edge)
807 {
808     VirtualRegister virtualRegister = edge->virtualRegister();
809     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
810     
811     switch (info.registerFormat()) {
812     case DataFormatNone: {
813         if (info.spillFormat() == DataFormatStorage) {
814             GPRReg gpr = allocate();
815             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
816             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
817             info.fillStorage(*m_stream, gpr);
818             return gpr;
819         }
820         
821         // Must be a cell; fill it as a cell and then return the pointer.
822         return fillSpeculateCell(edge);
823     }
824         
825     case DataFormatStorage: {
826         GPRReg gpr = info.gpr();
827         m_gprs.lock(gpr);
828         return gpr;
829     }
830         
831     default:
832         return fillSpeculateCell(edge);
833     }
834 }
835
836 void SpeculativeJIT::useChildren(Node* node)
837 {
838     if (node->flags() & NodeHasVarArgs) {
839         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
840             if (!!m_jit.graph().m_varArgChildren[childIdx])
841                 use(m_jit.graph().m_varArgChildren[childIdx]);
842         }
843     } else {
844         Edge child1 = node->child1();
845         if (!child1) {
846             ASSERT(!node->child2() && !node->child3());
847             return;
848         }
849         use(child1);
850         
851         Edge child2 = node->child2();
852         if (!child2) {
853             ASSERT(!node->child3());
854             return;
855         }
856         use(child2);
857         
858         Edge child3 = node->child3();
859         if (!child3)
860             return;
861         use(child3);
862     }
863 }
864
865 void SpeculativeJIT::compileIn(Node* node)
866 {
867     SpeculateCellOperand base(this, node->child2());
868     GPRReg baseGPR = base.gpr();
869     
870     if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
871         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
872             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo();
873             
874             GPRTemporary result(this);
875             GPRReg resultGPR = result.gpr();
876
877             use(node->child1());
878             
879             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
880             MacroAssembler::Label done = m_jit.label();
881             
882             OwnPtr<SlowPathGenerator> slowPath = slowPathCall(
883                 jump.m_jump, this, operationInOptimize,
884                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
885                 string->tryGetValueImpl());
886             
887             stubInfo->codeOrigin = node->origin.semantic;
888             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
889             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
890             stubInfo->patch.usedRegisters = usedRegisters();
891             stubInfo->patch.spillMode = NeedToSpill;
892             
893             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
894             addSlowPathGenerator(slowPath.release());
895                 
896             base.use();
897             
898             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
899             return;
900         }
901     }
902         
903     JSValueOperand key(this, node->child1());
904     JSValueRegs regs = key.jsValueRegs();
905         
906     GPRFlushedCallResult result(this);
907     GPRReg resultGPR = result.gpr();
908         
909     base.use();
910     key.use();
911         
912     flushRegisters();
913     callOperation(
914         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
915         baseGPR, regs);
916     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
917 }
918
919 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
920 {
921     unsigned branchIndexInBlock = detectPeepHoleBranch();
922     if (branchIndexInBlock != UINT_MAX) {
923         Node* branchNode = m_block->at(branchIndexInBlock);
924
925         ASSERT(node->adjustedRefCount() == 1);
926         
927         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
928     
929         m_indexInBlock = branchIndexInBlock;
930         m_currentNode = branchNode;
931         
932         return true;
933     }
934     
935     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
936     
937     return false;
938 }
939
940 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
941 {
942     unsigned branchIndexInBlock = detectPeepHoleBranch();
943     if (branchIndexInBlock != UINT_MAX) {
944         Node* branchNode = m_block->at(branchIndexInBlock);
945
946         ASSERT(node->adjustedRefCount() == 1);
947         
948         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
949     
950         m_indexInBlock = branchIndexInBlock;
951         m_currentNode = branchNode;
952         
953         return true;
954     }
955     
956     nonSpeculativeNonPeepholeStrictEq(node, invert);
957     
958     return false;
959 }
960
961 static const char* dataFormatString(DataFormat format)
962 {
963     // These values correspond to the DataFormat enum.
964     const char* strings[] = {
965         "[  ]",
966         "[ i]",
967         "[ d]",
968         "[ c]",
969         "Err!",
970         "Err!",
971         "Err!",
972         "Err!",
973         "[J ]",
974         "[Ji]",
975         "[Jd]",
976         "[Jc]",
977         "Err!",
978         "Err!",
979         "Err!",
980         "Err!",
981     };
982     return strings[format];
983 }
984
985 void SpeculativeJIT::dump(const char* label)
986 {
987     if (label)
988         dataLogF("<%s>\n", label);
989
990     dataLogF("  gprs:\n");
991     m_gprs.dump();
992     dataLogF("  fprs:\n");
993     m_fprs.dump();
994     dataLogF("  VirtualRegisters:\n");
995     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
996         GenerationInfo& info = m_generationInfo[i];
997         if (info.alive())
998             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
999         else
1000             dataLogF("    % 3d:[__][__]", i);
1001         if (info.registerFormat() == DataFormatDouble)
1002             dataLogF(":fpr%d\n", info.fpr());
1003         else if (info.registerFormat() != DataFormatNone
1004 #if USE(JSVALUE32_64)
1005             && !(info.registerFormat() & DataFormatJS)
1006 #endif
1007             ) {
1008             ASSERT(info.gpr() != InvalidGPRReg);
1009             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1010         } else
1011             dataLogF("\n");
1012     }
1013     if (label)
1014         dataLogF("</%s>\n", label);
1015 }
1016
1017 GPRTemporary::GPRTemporary()
1018     : m_jit(0)
1019     , m_gpr(InvalidGPRReg)
1020 {
1021 }
1022
1023 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1024     : m_jit(jit)
1025     , m_gpr(InvalidGPRReg)
1026 {
1027     m_gpr = m_jit->allocate();
1028 }
1029
1030 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1031     : m_jit(jit)
1032     , m_gpr(InvalidGPRReg)
1033 {
1034     m_gpr = m_jit->allocate(specific);
1035 }
1036
1037 #if USE(JSVALUE32_64)
1038 GPRTemporary::GPRTemporary(
1039     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1040     : m_jit(jit)
1041     , m_gpr(InvalidGPRReg)
1042 {
1043     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1044         m_gpr = m_jit->reuse(op1.gpr(which));
1045     else
1046         m_gpr = m_jit->allocate();
1047 }
1048 #endif // USE(JSVALUE32_64)
1049
1050 JSValueRegsTemporary::JSValueRegsTemporary() { }
1051
1052 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1053 #if USE(JSVALUE64)
1054     : m_gpr(jit)
1055 #else
1056     : m_payloadGPR(jit)
1057     , m_tagGPR(jit)
1058 #endif
1059 {
1060 }
1061
1062 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1063
1064 JSValueRegs JSValueRegsTemporary::regs()
1065 {
1066 #if USE(JSVALUE64)
1067     return JSValueRegs(m_gpr.gpr());
1068 #else
1069     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1070 #endif
1071 }
1072
1073 void GPRTemporary::adopt(GPRTemporary& other)
1074 {
1075     ASSERT(!m_jit);
1076     ASSERT(m_gpr == InvalidGPRReg);
1077     ASSERT(other.m_jit);
1078     ASSERT(other.m_gpr != InvalidGPRReg);
1079     m_jit = other.m_jit;
1080     m_gpr = other.m_gpr;
1081     other.m_jit = 0;
1082     other.m_gpr = InvalidGPRReg;
1083 }
1084
1085 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1086     : m_jit(jit)
1087     , m_fpr(InvalidFPRReg)
1088 {
1089     m_fpr = m_jit->fprAllocate();
1090 }
1091
1092 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1093     : m_jit(jit)
1094     , m_fpr(InvalidFPRReg)
1095 {
1096     if (m_jit->canReuse(op1.node()))
1097         m_fpr = m_jit->reuse(op1.fpr());
1098     else
1099         m_fpr = m_jit->fprAllocate();
1100 }
1101
1102 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1103     : m_jit(jit)
1104     , m_fpr(InvalidFPRReg)
1105 {
1106     if (m_jit->canReuse(op1.node()))
1107         m_fpr = m_jit->reuse(op1.fpr());
1108     else if (m_jit->canReuse(op2.node()))
1109         m_fpr = m_jit->reuse(op2.fpr());
1110     else
1111         m_fpr = m_jit->fprAllocate();
1112 }
1113
1114 #if USE(JSVALUE32_64)
1115 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1116     : m_jit(jit)
1117     , m_fpr(InvalidFPRReg)
1118 {
1119     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1120         m_fpr = m_jit->reuse(op1.fpr());
1121     else
1122         m_fpr = m_jit->fprAllocate();
1123 }
1124 #endif
1125
1126 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1127 {
1128     BasicBlock* taken = branchNode->branchData()->taken.block;
1129     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1130     
1131     SpeculateDoubleOperand op1(this, node->child1());
1132     SpeculateDoubleOperand op2(this, node->child2());
1133     
1134     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1135     jump(notTaken);
1136 }
1137
1138 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1139 {
1140     BasicBlock* taken = branchNode->branchData()->taken.block;
1141     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1142
1143     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1144     
1145     if (taken == nextBlock()) {
1146         condition = MacroAssembler::NotEqual;
1147         BasicBlock* tmp = taken;
1148         taken = notTaken;
1149         notTaken = tmp;
1150     }
1151
1152     SpeculateCellOperand op1(this, node->child1());
1153     SpeculateCellOperand op2(this, node->child2());
1154     
1155     GPRReg op1GPR = op1.gpr();
1156     GPRReg op2GPR = op2.gpr();
1157     
1158     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1159         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1160             speculationCheck(
1161                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), 
1162                 m_jit.branchStructurePtr(
1163                     MacroAssembler::Equal, 
1164                     MacroAssembler::Address(op1GPR, JSCell::structureIDOffset()), 
1165                     m_jit.vm()->stringStructure.get()));
1166         }
1167         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1168             speculationCheck(
1169                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1170                 m_jit.branchStructurePtr(
1171                     MacroAssembler::Equal, 
1172                     MacroAssembler::Address(op2GPR, JSCell::structureIDOffset()), 
1173                     m_jit.vm()->stringStructure.get()));
1174         }
1175     } else {
1176         GPRTemporary structure(this);
1177         GPRTemporary temp(this);
1178         GPRReg structureGPR = structure.gpr();
1179
1180         m_jit.emitLoadStructure(op1GPR, structureGPR, temp.gpr());
1181         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1182             speculationCheck(
1183                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1184                 m_jit.branchPtr(
1185                     MacroAssembler::Equal, 
1186                     structureGPR, 
1187                     MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1188         }
1189         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1190             m_jit.branchTest8(
1191                 MacroAssembler::NonZero, 
1192                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1193                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1194
1195         m_jit.emitLoadStructure(op2GPR, structureGPR, temp.gpr());
1196         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1197             speculationCheck(
1198                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1199                 m_jit.branchPtr(
1200                     MacroAssembler::Equal, 
1201                     structureGPR, 
1202                     MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1203         }
1204         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1205             m_jit.branchTest8(
1206                 MacroAssembler::NonZero, 
1207                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1208                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1209     }
1210
1211     branchPtr(condition, op1GPR, op2GPR, taken);
1212     jump(notTaken);
1213 }
1214
1215 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1216 {
1217     BasicBlock* taken = branchNode->branchData()->taken.block;
1218     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1219
1220     // The branch instruction will branch to the taken block.
1221     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1222     if (taken == nextBlock()) {
1223         condition = JITCompiler::invert(condition);
1224         BasicBlock* tmp = taken;
1225         taken = notTaken;
1226         notTaken = tmp;
1227     }
1228
1229     if (node->child1()->isBooleanConstant()) {
1230         bool imm = node->child1()->asBoolean();
1231         SpeculateBooleanOperand op2(this, node->child2());
1232         branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1233     } else if (node->child2()->isBooleanConstant()) {
1234         SpeculateBooleanOperand op1(this, node->child1());
1235         bool imm = node->child2()->asBoolean();
1236         branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1237     } else {
1238         SpeculateBooleanOperand op1(this, node->child1());
1239         SpeculateBooleanOperand op2(this, node->child2());
1240         branch32(condition, op1.gpr(), op2.gpr(), taken);
1241     }
1242
1243     jump(notTaken);
1244 }
1245
1246 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1247 {
1248     BasicBlock* taken = branchNode->branchData()->taken.block;
1249     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1250
1251     // The branch instruction will branch to the taken block.
1252     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1253     if (taken == nextBlock()) {
1254         condition = JITCompiler::invert(condition);
1255         BasicBlock* tmp = taken;
1256         taken = notTaken;
1257         notTaken = tmp;
1258     }
1259
1260     if (node->child1()->isInt32Constant()) {
1261         int32_t imm = node->child1()->asInt32();
1262         SpeculateInt32Operand op2(this, node->child2());
1263         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1264     } else if (node->child2()->isInt32Constant()) {
1265         SpeculateInt32Operand op1(this, node->child1());
1266         int32_t imm = node->child2()->asInt32();
1267         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1268     } else {
1269         SpeculateInt32Operand op1(this, node->child1());
1270         SpeculateInt32Operand op2(this, node->child2());
1271         branch32(condition, op1.gpr(), op2.gpr(), taken);
1272     }
1273
1274     jump(notTaken);
1275 }
1276
1277 // Returns true if the compare is fused with a subsequent branch.
1278 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1279 {
1280     // Fused compare & branch.
1281     unsigned branchIndexInBlock = detectPeepHoleBranch();
1282     if (branchIndexInBlock != UINT_MAX) {
1283         Node* branchNode = m_block->at(branchIndexInBlock);
1284
1285         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1286         // so can be no intervening nodes to also reference the compare. 
1287         ASSERT(node->adjustedRefCount() == 1);
1288
1289         if (node->isBinaryUseKind(Int32Use))
1290             compilePeepHoleInt32Branch(node, branchNode, condition);
1291 #if USE(JSVALUE64)
1292         else if (node->isBinaryUseKind(Int52RepUse))
1293             compilePeepHoleInt52Branch(node, branchNode, condition);
1294 #endif // USE(JSVALUE64)
1295         else if (node->isBinaryUseKind(DoubleRepUse))
1296             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1297         else if (node->op() == CompareEq) {
1298             if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1299                 // Use non-peephole comparison, for now.
1300                 return false;
1301             }
1302             if (node->isBinaryUseKind(BooleanUse))
1303                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1304             else if (node->isBinaryUseKind(ObjectUse))
1305                 compilePeepHoleObjectEquality(node, branchNode);
1306             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1307                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1308             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1309                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1310             else {
1311                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1312                 return true;
1313             }
1314         } else {
1315             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1316             return true;
1317         }
1318
1319         use(node->child1());
1320         use(node->child2());
1321         m_indexInBlock = branchIndexInBlock;
1322         m_currentNode = branchNode;
1323         return true;
1324     }
1325     return false;
1326 }
1327
1328 void SpeculativeJIT::noticeOSRBirth(Node* node)
1329 {
1330     if (!node->hasVirtualRegister())
1331         return;
1332     
1333     VirtualRegister virtualRegister = node->virtualRegister();
1334     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1335     
1336     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1337 }
1338
1339 void SpeculativeJIT::compileMovHint(Node* node)
1340 {
1341     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1342     
1343     Node* child = node->child1().node();
1344     noticeOSRBirth(child);
1345     
1346     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1347 }
1348
1349 void SpeculativeJIT::bail(AbortReason reason)
1350 {
1351     if (verboseCompilationEnabled())
1352         dataLog("Bailing compilation.\n");
1353     m_compileOkay = true;
1354     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1355     clearGenerationInfo();
1356 }
1357
1358 void SpeculativeJIT::compileCurrentBlock()
1359 {
1360     ASSERT(m_compileOkay);
1361     
1362     if (!m_block)
1363         return;
1364     
1365     ASSERT(m_block->isReachable);
1366     
1367     m_jit.blockHeads()[m_block->index] = m_jit.label();
1368
1369     if (!m_block->intersectionOfCFAHasVisited) {
1370         // Don't generate code for basic blocks that are unreachable according to CFA.
1371         // But to be sure that nobody has generated a jump to this block, drop in a
1372         // breakpoint here.
1373         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1374         return;
1375     }
1376
1377     m_stream->appendAndLog(VariableEvent::reset());
1378     
1379     m_jit.jitAssertHasValidCallFrame();
1380     m_jit.jitAssertTagsInPlace();
1381     m_jit.jitAssertArgumentCountSane();
1382
1383     m_state.reset();
1384     m_state.beginBasicBlock(m_block);
1385     
1386     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1387         int operand = m_block->variablesAtHead.operandForIndex(i);
1388         Node* node = m_block->variablesAtHead[i];
1389         if (!node)
1390             continue; // No need to record dead SetLocal's.
1391         
1392         VariableAccessData* variable = node->variableAccessData();
1393         DataFormat format;
1394         if (!node->refCount())
1395             continue; // No need to record dead SetLocal's.
1396         format = dataFormatFor(variable->flushFormat());
1397         m_stream->appendAndLog(
1398             VariableEvent::setLocal(
1399                 VirtualRegister(operand),
1400                 variable->machineLocal(),
1401                 format));
1402     }
1403     
1404     m_codeOriginForExitTarget = CodeOrigin();
1405     m_codeOriginForExitProfile = CodeOrigin();
1406     
1407     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1408         m_currentNode = m_block->at(m_indexInBlock);
1409         
1410         // We may have hit a contradiction that the CFA was aware of but that the JIT
1411         // didn't cause directly.
1412         if (!m_state.isValid()) {
1413             bail(DFGBailedAtTopOfBlock);
1414             return;
1415         }
1416
1417         if (ASSERT_DISABLED)
1418             m_canExit = true; // Essentially disable the assertions.
1419         else
1420             m_canExit = mayExit(m_jit.graph(), m_currentNode);
1421         
1422         m_interpreter.startExecuting();
1423         m_jit.setForNode(m_currentNode);
1424         m_codeOriginForExitTarget = m_currentNode->origin.forExit;
1425         m_codeOriginForExitProfile = m_currentNode->origin.semantic;
1426         m_lastGeneratedNode = m_currentNode->op();
1427         if (!m_currentNode->shouldGenerate()) {
1428             switch (m_currentNode->op()) {
1429             case JSConstant:
1430                 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1431                 break;
1432                 
1433             case SetLocal:
1434                 RELEASE_ASSERT_NOT_REACHED();
1435                 break;
1436                 
1437             case MovHint:
1438                 compileMovHint(m_currentNode);
1439                 break;
1440                 
1441             case ZombieHint: {
1442                 recordSetLocal(m_currentNode->unlinkedLocal(), VirtualRegister(), DataFormatDead);
1443                 break;
1444             }
1445
1446             default:
1447                 if (belongsInMinifiedGraph(m_currentNode->op()))
1448                     m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1449                 break;
1450             }
1451         } else {
1452             
1453             if (verboseCompilationEnabled()) {
1454                 dataLogF(
1455                     "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1456                     (int)m_currentNode->index(),
1457                     m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1458                 dataLog("\n");
1459             }
1460             
1461             compile(m_currentNode);
1462
1463 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1464             m_jit.clearRegisterAllocationOffsets();
1465 #endif
1466
1467             if (!m_compileOkay) {
1468                 bail(DFGBailedAtEndOfNode);
1469                 return;
1470             }
1471             
1472             if (belongsInMinifiedGraph(m_currentNode->op())) {
1473                 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1474                 noticeOSRBirth(m_currentNode);
1475             }
1476         }
1477         
1478         // Make sure that the abstract state is rematerialized for the next node.
1479         m_interpreter.executeEffects(m_indexInBlock);
1480     }
1481     
1482     // Perform the most basic verification that children have been used correctly.
1483     if (!ASSERT_DISABLED) {
1484         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1485             GenerationInfo& info = m_generationInfo[index];
1486             RELEASE_ASSERT(!info.alive());
1487         }
1488     }
1489 }
1490
1491 // If we are making type predictions about our arguments then
1492 // we need to check that they are correct on function entry.
1493 void SpeculativeJIT::checkArgumentTypes()
1494 {
1495     ASSERT(!m_currentNode);
1496     m_isCheckingArgumentTypes = true;
1497     m_codeOriginForExitTarget = CodeOrigin(0);
1498     m_codeOriginForExitProfile = CodeOrigin(0);
1499
1500     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1501         Node* node = m_jit.graph().m_arguments[i];
1502         if (!node) {
1503             // The argument is dead. We don't do any checks for such arguments.
1504             continue;
1505         }
1506         
1507         ASSERT(node->op() == SetArgument);
1508         ASSERT(node->shouldGenerate());
1509
1510         VariableAccessData* variableAccessData = node->variableAccessData();
1511         FlushFormat format = variableAccessData->flushFormat();
1512         
1513         if (format == FlushedJSValue)
1514             continue;
1515         
1516         VirtualRegister virtualRegister = variableAccessData->local();
1517
1518         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1519         
1520 #if USE(JSVALUE64)
1521         switch (format) {
1522         case FlushedInt32: {
1523             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1524             break;
1525         }
1526         case FlushedBoolean: {
1527             GPRTemporary temp(this);
1528             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1529             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1530             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1531             break;
1532         }
1533         case FlushedCell: {
1534             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1535             break;
1536         }
1537         default:
1538             RELEASE_ASSERT_NOT_REACHED();
1539             break;
1540         }
1541 #else
1542         switch (format) {
1543         case FlushedInt32: {
1544             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1545             break;
1546         }
1547         case FlushedBoolean: {
1548             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1549             break;
1550         }
1551         case FlushedCell: {
1552             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1553             break;
1554         }
1555         default:
1556             RELEASE_ASSERT_NOT_REACHED();
1557             break;
1558         }
1559 #endif
1560     }
1561     m_isCheckingArgumentTypes = false;
1562 }
1563
1564 bool SpeculativeJIT::compile()
1565 {
1566     checkArgumentTypes();
1567     
1568     ASSERT(!m_currentNode);
1569     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1570         m_jit.setForBlockIndex(blockIndex);
1571         m_block = m_jit.graph().block(blockIndex);
1572         compileCurrentBlock();
1573     }
1574     linkBranches();
1575     return true;
1576 }
1577
1578 void SpeculativeJIT::createOSREntries()
1579 {
1580     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1581         BasicBlock* block = m_jit.graph().block(blockIndex);
1582         if (!block)
1583             continue;
1584         if (!block->isOSRTarget)
1585             continue;
1586         
1587         // Currently we don't have OSR entry trampolines. We could add them
1588         // here if need be.
1589         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1590     }
1591 }
1592
1593 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1594 {
1595     unsigned osrEntryIndex = 0;
1596     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1597         BasicBlock* block = m_jit.graph().block(blockIndex);
1598         if (!block)
1599             continue;
1600         if (!block->isOSRTarget)
1601             continue;
1602         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1603     }
1604     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1605 }
1606
1607 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1608 {
1609     Edge child3 = m_jit.graph().varArgChild(node, 2);
1610     Edge child4 = m_jit.graph().varArgChild(node, 3);
1611
1612     ArrayMode arrayMode = node->arrayMode();
1613     
1614     GPRReg baseReg = base.gpr();
1615     GPRReg propertyReg = property.gpr();
1616     
1617     SpeculateDoubleOperand value(this, child3);
1618
1619     FPRReg valueReg = value.fpr();
1620     
1621     DFG_TYPE_CHECK(
1622         JSValueRegs(), child3, SpecFullRealNumber,
1623         m_jit.branchDouble(
1624             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1625     
1626     if (!m_compileOkay)
1627         return;
1628     
1629     StorageOperand storage(this, child4);
1630     GPRReg storageReg = storage.gpr();
1631
1632     if (node->op() == PutByValAlias) {
1633         // Store the value to the array.
1634         GPRReg propertyReg = property.gpr();
1635         FPRReg valueReg = value.fpr();
1636         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1637         
1638         noResult(m_currentNode);
1639         return;
1640     }
1641     
1642     GPRTemporary temporary;
1643     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1644
1645     MacroAssembler::Jump slowCase;
1646     
1647     if (arrayMode.isInBounds()) {
1648         speculationCheck(
1649             OutOfBounds, JSValueRegs(), 0,
1650             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1651     } else {
1652         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1653         
1654         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1655         
1656         if (!arrayMode.isOutOfBounds())
1657             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1658         
1659         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1660         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1661         
1662         inBounds.link(&m_jit);
1663     }
1664     
1665     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1666
1667     base.use();
1668     property.use();
1669     value.use();
1670     storage.use();
1671     
1672     if (arrayMode.isOutOfBounds()) {
1673         addSlowPathGenerator(
1674             slowPathCall(
1675                 slowCase, this,
1676                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1677                 NoResult, baseReg, propertyReg, valueReg));
1678     }
1679
1680     noResult(m_currentNode, UseChildrenCalledExplicitly);
1681 }
1682
1683 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1684 {
1685     SpeculateCellOperand string(this, node->child1());
1686     SpeculateStrictInt32Operand index(this, node->child2());
1687     StorageOperand storage(this, node->child3());
1688
1689     GPRReg stringReg = string.gpr();
1690     GPRReg indexReg = index.gpr();
1691     GPRReg storageReg = storage.gpr();
1692     
1693     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1694
1695     // unsigned comparison so we can filter out negative indices and indices that are too large
1696     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1697
1698     GPRTemporary scratch(this);
1699     GPRReg scratchReg = scratch.gpr();
1700
1701     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1702
1703     // Load the character into scratchReg
1704     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1705
1706     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1707     JITCompiler::Jump cont8Bit = m_jit.jump();
1708
1709     is16Bit.link(&m_jit);
1710
1711     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1712
1713     cont8Bit.link(&m_jit);
1714
1715     int32Result(scratchReg, m_currentNode);
1716 }
1717
1718 void SpeculativeJIT::compileGetByValOnString(Node* node)
1719 {
1720     SpeculateCellOperand base(this, node->child1());
1721     SpeculateStrictInt32Operand property(this, node->child2());
1722     StorageOperand storage(this, node->child3());
1723     GPRReg baseReg = base.gpr();
1724     GPRReg propertyReg = property.gpr();
1725     GPRReg storageReg = storage.gpr();
1726
1727     GPRTemporary scratch(this);
1728     GPRReg scratchReg = scratch.gpr();
1729 #if USE(JSVALUE32_64)
1730     GPRTemporary resultTag;
1731     GPRReg resultTagReg = InvalidGPRReg;
1732     if (node->arrayMode().isOutOfBounds()) {
1733         GPRTemporary realResultTag(this);
1734         resultTag.adopt(realResultTag);
1735         resultTagReg = resultTag.gpr();
1736     }
1737 #endif
1738
1739     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1740
1741     // unsigned comparison so we can filter out negative indices and indices that are too large
1742     JITCompiler::Jump outOfBounds = m_jit.branch32(
1743         MacroAssembler::AboveOrEqual, propertyReg,
1744         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1745     if (node->arrayMode().isInBounds())
1746         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1747
1748     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1749
1750     // Load the character into scratchReg
1751     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1752
1753     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1754     JITCompiler::Jump cont8Bit = m_jit.jump();
1755
1756     is16Bit.link(&m_jit);
1757
1758     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1759
1760     JITCompiler::Jump bigCharacter =
1761         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1762
1763     // 8 bit string values don't need the isASCII check.
1764     cont8Bit.link(&m_jit);
1765
1766     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1767     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1768     m_jit.loadPtr(scratchReg, scratchReg);
1769
1770     addSlowPathGenerator(
1771         slowPathCall(
1772             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1773
1774     if (node->arrayMode().isOutOfBounds()) {
1775 #if USE(JSVALUE32_64)
1776         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1777 #endif
1778
1779         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1780         if (globalObject->stringPrototypeChainIsSane()) {
1781 #if USE(JSVALUE64)
1782             addSlowPathGenerator(adoptPtr(new SaneStringGetByValSlowPathGenerator(
1783                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg)));
1784 #else
1785             addSlowPathGenerator(adoptPtr(new SaneStringGetByValSlowPathGenerator(
1786                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
1787                 baseReg, propertyReg)));
1788 #endif
1789         } else {
1790 #if USE(JSVALUE64)
1791             addSlowPathGenerator(
1792                 slowPathCall(
1793                     outOfBounds, this, operationGetByValStringInt,
1794                     scratchReg, baseReg, propertyReg));
1795 #else
1796             addSlowPathGenerator(
1797                 slowPathCall(
1798                     outOfBounds, this, operationGetByValStringInt,
1799                     resultTagReg, scratchReg, baseReg, propertyReg));
1800 #endif
1801         }
1802         
1803 #if USE(JSVALUE64)
1804         jsValueResult(scratchReg, m_currentNode);
1805 #else
1806         jsValueResult(resultTagReg, scratchReg, m_currentNode);
1807 #endif
1808     } else
1809         cellResult(scratchReg, m_currentNode);
1810 }
1811
1812 void SpeculativeJIT::compileFromCharCode(Node* node)
1813 {
1814     SpeculateStrictInt32Operand property(this, node->child1());
1815     GPRReg propertyReg = property.gpr();
1816     GPRTemporary smallStrings(this);
1817     GPRTemporary scratch(this);
1818     GPRReg scratchReg = scratch.gpr();
1819     GPRReg smallStringsReg = smallStrings.gpr();
1820
1821     JITCompiler::JumpList slowCases;
1822     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
1823     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
1824     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
1825
1826     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
1827     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
1828     cellResult(scratchReg, m_currentNode);
1829 }
1830
1831 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
1832 {
1833     VirtualRegister virtualRegister = node->virtualRegister();
1834     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1835
1836     switch (info.registerFormat()) {
1837     case DataFormatStorage:
1838         RELEASE_ASSERT_NOT_REACHED();
1839
1840     case DataFormatBoolean:
1841     case DataFormatCell:
1842         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1843         return GeneratedOperandTypeUnknown;
1844
1845     case DataFormatNone:
1846     case DataFormatJSCell:
1847     case DataFormatJS:
1848     case DataFormatJSBoolean:
1849     case DataFormatJSDouble:
1850         return GeneratedOperandJSValue;
1851
1852     case DataFormatJSInt32:
1853     case DataFormatInt32:
1854         return GeneratedOperandInteger;
1855
1856     default:
1857         RELEASE_ASSERT_NOT_REACHED();
1858         return GeneratedOperandTypeUnknown;
1859     }
1860 }
1861
1862 void SpeculativeJIT::compileValueToInt32(Node* node)
1863 {
1864     switch (node->child1().useKind()) {
1865 #if USE(JSVALUE64)
1866     case Int52RepUse: {
1867         SpeculateStrictInt52Operand op1(this, node->child1());
1868         GPRTemporary result(this, Reuse, op1);
1869         GPRReg op1GPR = op1.gpr();
1870         GPRReg resultGPR = result.gpr();
1871         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
1872         int32Result(resultGPR, node, DataFormatInt32);
1873         return;
1874     }
1875 #endif // USE(JSVALUE64)
1876         
1877     case DoubleRepUse: {
1878         GPRTemporary result(this);
1879         SpeculateDoubleOperand op1(this, node->child1());
1880         FPRReg fpr = op1.fpr();
1881         GPRReg gpr = result.gpr();
1882         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
1883         
1884         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
1885         
1886         int32Result(gpr, node);
1887         return;
1888     }
1889     
1890     case NumberUse:
1891     case NotCellUse: {
1892         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
1893         case GeneratedOperandInteger: {
1894             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
1895             GPRTemporary result(this, Reuse, op1);
1896             m_jit.move(op1.gpr(), result.gpr());
1897             int32Result(result.gpr(), node, op1.format());
1898             return;
1899         }
1900         case GeneratedOperandJSValue: {
1901             GPRTemporary result(this);
1902 #if USE(JSVALUE64)
1903             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1904
1905             GPRReg gpr = op1.gpr();
1906             GPRReg resultGpr = result.gpr();
1907             FPRTemporary tempFpr(this);
1908             FPRReg fpr = tempFpr.fpr();
1909
1910             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
1911             JITCompiler::JumpList converted;
1912
1913             if (node->child1().useKind() == NumberUse) {
1914                 DFG_TYPE_CHECK(
1915                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
1916                     m_jit.branchTest64(
1917                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
1918             } else {
1919                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
1920                 
1921                 DFG_TYPE_CHECK(
1922                     JSValueRegs(gpr), node->child1(), ~SpecCell, branchIsCell(JSValueRegs(gpr)));
1923                 
1924                 // It's not a cell: so true turns into 1 and all else turns into 0.
1925                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
1926                 converted.append(m_jit.jump());
1927                 
1928                 isNumber.link(&m_jit);
1929             }
1930
1931             // First, if we get here we have a double encoded as a JSValue
1932             m_jit.move(gpr, resultGpr);
1933             unboxDouble(resultGpr, fpr);
1934
1935             silentSpillAllRegisters(resultGpr);
1936             callOperation(toInt32, resultGpr, fpr);
1937             silentFillAllRegisters(resultGpr);
1938
1939             converted.append(m_jit.jump());
1940
1941             isInteger.link(&m_jit);
1942             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
1943
1944             converted.link(&m_jit);
1945 #else
1946             Node* childNode = node->child1().node();
1947             VirtualRegister virtualRegister = childNode->virtualRegister();
1948             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1949
1950             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1951
1952             GPRReg payloadGPR = op1.payloadGPR();
1953             GPRReg resultGpr = result.gpr();
1954         
1955             JITCompiler::JumpList converted;
1956
1957             if (info.registerFormat() == DataFormatJSInt32)
1958                 m_jit.move(payloadGPR, resultGpr);
1959             else {
1960                 GPRReg tagGPR = op1.tagGPR();
1961                 FPRTemporary tempFpr(this);
1962                 FPRReg fpr = tempFpr.fpr();
1963                 FPRTemporary scratch(this);
1964
1965                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
1966
1967                 if (node->child1().useKind() == NumberUse) {
1968                     DFG_TYPE_CHECK(
1969                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
1970                         m_jit.branch32(
1971                             MacroAssembler::AboveOrEqual, tagGPR,
1972                             TrustedImm32(JSValue::LowestTag)));
1973                 } else {
1974                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
1975                     
1976                     DFG_TYPE_CHECK(
1977                         op1.jsValueRegs(), node->child1(), ~SpecCell,
1978                         branchIsCell(op1.jsValueRegs()));
1979                     
1980                     // It's not a cell: so true turns into 1 and all else turns into 0.
1981                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
1982                     m_jit.move(TrustedImm32(0), resultGpr);
1983                     converted.append(m_jit.jump());
1984                     
1985                     isBoolean.link(&m_jit);
1986                     m_jit.move(payloadGPR, resultGpr);
1987                     converted.append(m_jit.jump());
1988                     
1989                     isNumber.link(&m_jit);
1990                 }
1991
1992                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
1993
1994                 silentSpillAllRegisters(resultGpr);
1995                 callOperation(toInt32, resultGpr, fpr);
1996                 silentFillAllRegisters(resultGpr);
1997
1998                 converted.append(m_jit.jump());
1999
2000                 isInteger.link(&m_jit);
2001                 m_jit.move(payloadGPR, resultGpr);
2002
2003                 converted.link(&m_jit);
2004             }
2005 #endif
2006             int32Result(resultGpr, node);
2007             return;
2008         }
2009         case GeneratedOperandTypeUnknown:
2010             RELEASE_ASSERT(!m_compileOkay);
2011             return;
2012         }
2013         RELEASE_ASSERT_NOT_REACHED();
2014         return;
2015     }
2016     
2017     default:
2018         ASSERT(!m_compileOkay);
2019         return;
2020     }
2021 }
2022
2023 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2024 {
2025     if (doesOverflow(node->arithMode())) {
2026         // We know that this sometimes produces doubles. So produce a double every
2027         // time. This at least allows subsequent code to not have weird conditionals.
2028             
2029         SpeculateInt32Operand op1(this, node->child1());
2030         FPRTemporary result(this);
2031             
2032         GPRReg inputGPR = op1.gpr();
2033         FPRReg outputFPR = result.fpr();
2034             
2035         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2036             
2037         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2038         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2039         positive.link(&m_jit);
2040             
2041         doubleResult(outputFPR, node);
2042         return;
2043     }
2044     
2045     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2046
2047     SpeculateInt32Operand op1(this, node->child1());
2048     GPRTemporary result(this);
2049
2050     m_jit.move(op1.gpr(), result.gpr());
2051
2052     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2053
2054     int32Result(result.gpr(), node, op1.format());
2055 }
2056
2057 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2058 {
2059     SpeculateDoubleOperand op1(this, node->child1());
2060     FPRTemporary scratch(this);
2061     GPRTemporary result(this);
2062     
2063     FPRReg valueFPR = op1.fpr();
2064     FPRReg scratchFPR = scratch.fpr();
2065     GPRReg resultGPR = result.gpr();
2066
2067     JITCompiler::JumpList failureCases;
2068     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2069     m_jit.branchConvertDoubleToInt32(
2070         valueFPR, resultGPR, failureCases, scratchFPR,
2071         shouldCheckNegativeZero(node->arithMode()));
2072     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2073
2074     int32Result(resultGPR, node);
2075 }
2076
2077 void SpeculativeJIT::compileDoubleRep(Node* node)
2078 {
2079     switch (node->child1().useKind()) {
2080     case NumberUse: {
2081         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2082     
2083         if (isInt32Speculation(m_state.forNode(node->child1()).m_type)) {
2084             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2085             FPRTemporary result(this);
2086             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2087             doubleResult(result.fpr(), node);
2088             return;
2089         }
2090     
2091         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2092         FPRTemporary result(this);
2093     
2094 #if USE(JSVALUE64)
2095         GPRTemporary temp(this);
2096
2097         GPRReg op1GPR = op1.gpr();
2098         GPRReg tempGPR = temp.gpr();
2099         FPRReg resultFPR = result.fpr();
2100     
2101         JITCompiler::Jump isInteger = m_jit.branch64(
2102             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2103     
2104         if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2105             typeCheck(
2106                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2107                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2108         }
2109     
2110         m_jit.move(op1GPR, tempGPR);
2111         unboxDouble(tempGPR, resultFPR);
2112         JITCompiler::Jump done = m_jit.jump();
2113     
2114         isInteger.link(&m_jit);
2115         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2116         done.link(&m_jit);
2117 #else // USE(JSVALUE64) -> this is the 32_64 case
2118         FPRTemporary temp(this);
2119     
2120         GPRReg op1TagGPR = op1.tagGPR();
2121         GPRReg op1PayloadGPR = op1.payloadGPR();
2122         FPRReg tempFPR = temp.fpr();
2123         FPRReg resultFPR = result.fpr();
2124     
2125         JITCompiler::Jump isInteger = m_jit.branch32(
2126             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2127     
2128         if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2129             typeCheck(
2130                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2131                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2132         }
2133     
2134         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2135         JITCompiler::Jump done = m_jit.jump();
2136     
2137         isInteger.link(&m_jit);
2138         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2139         done.link(&m_jit);
2140 #endif // USE(JSVALUE64)
2141     
2142         doubleResult(resultFPR, node);
2143         return;
2144     }
2145         
2146 #if USE(JSVALUE64)
2147     case Int52RepUse: {
2148         SpeculateStrictInt52Operand value(this, node->child1());
2149         FPRTemporary result(this);
2150         
2151         GPRReg valueGPR = value.gpr();
2152         FPRReg resultFPR = result.fpr();
2153
2154         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2155         
2156         doubleResult(resultFPR, node);
2157         return;
2158     }
2159 #endif // USE(JSVALUE64)
2160         
2161     default:
2162         RELEASE_ASSERT_NOT_REACHED();
2163         return;
2164     }
2165 }
2166
2167 void SpeculativeJIT::compileValueRep(Node* node)
2168 {
2169     switch (node->child1().useKind()) {
2170     case DoubleRepUse: {
2171         SpeculateDoubleOperand value(this, node->child1());
2172         JSValueRegsTemporary result(this);
2173         
2174         FPRReg valueFPR = value.fpr();
2175         JSValueRegs resultRegs = result.regs();
2176         
2177         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2178         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2179         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2180         // local was purified.
2181         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2182             m_jit.purifyNaN(valueFPR);
2183
2184         boxDouble(valueFPR, resultRegs);
2185         
2186         jsValueResult(resultRegs, node);
2187         return;
2188     }
2189         
2190 #if USE(JSVALUE64)
2191     case Int52RepUse: {
2192         SpeculateStrictInt52Operand value(this, node->child1());
2193         GPRTemporary result(this);
2194         
2195         GPRReg valueGPR = value.gpr();
2196         GPRReg resultGPR = result.gpr();
2197         
2198         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2199         
2200         jsValueResult(resultGPR, node);
2201         return;
2202     }
2203 #endif // USE(JSVALUE64)
2204         
2205     default:
2206         RELEASE_ASSERT_NOT_REACHED();
2207         return;
2208     }
2209 }
2210
2211 static double clampDoubleToByte(double d)
2212 {
2213     d += 0.5;
2214     if (!(d > 0))
2215         d = 0;
2216     else if (d > 255)
2217         d = 255;
2218     return d;
2219 }
2220
2221 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2222 {
2223     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2224     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2225     jit.xorPtr(result, result);
2226     MacroAssembler::Jump clamped = jit.jump();
2227     tooBig.link(&jit);
2228     jit.move(JITCompiler::TrustedImm32(255), result);
2229     clamped.link(&jit);
2230     inBounds.link(&jit);
2231 }
2232
2233 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2234 {
2235     // Unordered compare so we pick up NaN
2236     static const double zero = 0;
2237     static const double byteMax = 255;
2238     static const double half = 0.5;
2239     jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2240     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2241     jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2242     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2243     
2244     jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2245     // FIXME: This should probably just use a floating point round!
2246     // https://bugs.webkit.org/show_bug.cgi?id=72054
2247     jit.addDouble(source, scratch);
2248     jit.truncateDoubleToInt32(scratch, result);   
2249     MacroAssembler::Jump truncatedInt = jit.jump();
2250     
2251     tooSmall.link(&jit);
2252     jit.xorPtr(result, result);
2253     MacroAssembler::Jump zeroed = jit.jump();
2254     
2255     tooBig.link(&jit);
2256     jit.move(JITCompiler::TrustedImm32(255), result);
2257     
2258     truncatedInt.link(&jit);
2259     zeroed.link(&jit);
2260
2261 }
2262
2263 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2264 {
2265     if (node->op() == PutByValAlias)
2266         return JITCompiler::Jump();
2267     if (JSArrayBufferView* view = m_jit.graph().tryGetFoldableViewForChild1(node)) {
2268         uint32_t length = view->length();
2269         Node* indexNode = m_jit.graph().child(node, 1).node();
2270         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2271             return JITCompiler::Jump();
2272         return m_jit.branch32(
2273             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2274     }
2275     return m_jit.branch32(
2276         MacroAssembler::AboveOrEqual, indexGPR,
2277         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2278 }
2279
2280 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2281 {
2282     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2283     if (!jump.isSet())
2284         return;
2285     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2286 }
2287
2288 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2289 {
2290     ASSERT(isInt(type));
2291     
2292     SpeculateCellOperand base(this, node->child1());
2293     SpeculateStrictInt32Operand property(this, node->child2());
2294     StorageOperand storage(this, node->child3());
2295
2296     GPRReg baseReg = base.gpr();
2297     GPRReg propertyReg = property.gpr();
2298     GPRReg storageReg = storage.gpr();
2299
2300     GPRTemporary result(this);
2301     GPRReg resultReg = result.gpr();
2302
2303     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2304
2305     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2306     switch (elementSize(type)) {
2307     case 1:
2308         if (isSigned(type))
2309             m_jit.load8Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2310         else
2311             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2312         break;
2313     case 2:
2314         if (isSigned(type))
2315             m_jit.load16Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2316         else
2317             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2318         break;
2319     case 4:
2320         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2321         break;
2322     default:
2323         CRASH();
2324     }
2325     if (elementSize(type) < 4 || isSigned(type)) {
2326         int32Result(resultReg, node);
2327         return;
2328     }
2329     
2330     ASSERT(elementSize(type) == 4 && !isSigned(type));
2331     if (node->shouldSpeculateInt32()) {
2332         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2333         int32Result(resultReg, node);
2334         return;
2335     }
2336     
2337 #if USE(JSVALUE64)
2338     if (node->shouldSpeculateMachineInt()) {
2339         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2340         strictInt52Result(resultReg, node);
2341         return;
2342     }
2343 #endif
2344     
2345     FPRTemporary fresult(this);
2346     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2347     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2348     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2349     positive.link(&m_jit);
2350     doubleResult(fresult.fpr(), node);
2351 }
2352
2353 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2354 {
2355     ASSERT(isInt(type));
2356     
2357     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2358     GPRReg storageReg = storage.gpr();
2359     
2360     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2361     
2362     GPRTemporary value;
2363     GPRReg valueGPR = InvalidGPRReg;
2364     
2365     if (valueUse->isConstant()) {
2366         JSValue jsValue = valueUse->asJSValue();
2367         if (!jsValue.isNumber()) {
2368             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2369             noResult(node);
2370             return;
2371         }
2372         double d = jsValue.asNumber();
2373         if (isClamped(type)) {
2374             ASSERT(elementSize(type) == 1);
2375             d = clampDoubleToByte(d);
2376         }
2377         GPRTemporary scratch(this);
2378         GPRReg scratchReg = scratch.gpr();
2379         m_jit.move(Imm32(toInt32(d)), scratchReg);
2380         value.adopt(scratch);
2381         valueGPR = scratchReg;
2382     } else {
2383         switch (valueUse.useKind()) {
2384         case Int32Use: {
2385             SpeculateInt32Operand valueOp(this, valueUse);
2386             GPRTemporary scratch(this);
2387             GPRReg scratchReg = scratch.gpr();
2388             m_jit.move(valueOp.gpr(), scratchReg);
2389             if (isClamped(type)) {
2390                 ASSERT(elementSize(type) == 1);
2391                 compileClampIntegerToByte(m_jit, scratchReg);
2392             }
2393             value.adopt(scratch);
2394             valueGPR = scratchReg;
2395             break;
2396         }
2397             
2398 #if USE(JSVALUE64)
2399         case Int52RepUse: {
2400             SpeculateStrictInt52Operand valueOp(this, valueUse);
2401             GPRTemporary scratch(this);
2402             GPRReg scratchReg = scratch.gpr();
2403             m_jit.move(valueOp.gpr(), scratchReg);
2404             if (isClamped(type)) {
2405                 ASSERT(elementSize(type) == 1);
2406                 MacroAssembler::Jump inBounds = m_jit.branch64(
2407                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2408                 MacroAssembler::Jump tooBig = m_jit.branch64(
2409                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2410                 m_jit.move(TrustedImm32(0), scratchReg);
2411                 MacroAssembler::Jump clamped = m_jit.jump();
2412                 tooBig.link(&m_jit);
2413                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2414                 clamped.link(&m_jit);
2415                 inBounds.link(&m_jit);
2416             }
2417             value.adopt(scratch);
2418             valueGPR = scratchReg;
2419             break;
2420         }
2421 #endif // USE(JSVALUE64)
2422             
2423         case DoubleRepUse: {
2424             if (isClamped(type)) {
2425                 ASSERT(elementSize(type) == 1);
2426                 SpeculateDoubleOperand valueOp(this, valueUse);
2427                 GPRTemporary result(this);
2428                 FPRTemporary floatScratch(this);
2429                 FPRReg fpr = valueOp.fpr();
2430                 GPRReg gpr = result.gpr();
2431                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2432                 value.adopt(result);
2433                 valueGPR = gpr;
2434             } else {
2435                 SpeculateDoubleOperand valueOp(this, valueUse);
2436                 GPRTemporary result(this);
2437                 FPRReg fpr = valueOp.fpr();
2438                 GPRReg gpr = result.gpr();
2439                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2440                 m_jit.xorPtr(gpr, gpr);
2441                 MacroAssembler::Jump fixed = m_jit.jump();
2442                 notNaN.link(&m_jit);
2443                 
2444                 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2445                     fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2446                 
2447                 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr));
2448                 
2449                 fixed.link(&m_jit);
2450                 value.adopt(result);
2451                 valueGPR = gpr;
2452             }
2453             break;
2454         }
2455             
2456         default:
2457             RELEASE_ASSERT_NOT_REACHED();
2458             break;
2459         }
2460     }
2461     
2462     ASSERT_UNUSED(valueGPR, valueGPR != property);
2463     ASSERT(valueGPR != base);
2464     ASSERT(valueGPR != storageReg);
2465     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2466     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2467         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2468         outOfBounds = MacroAssembler::Jump();
2469     }
2470
2471     switch (elementSize(type)) {
2472     case 1:
2473         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2474         break;
2475     case 2:
2476         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2477         break;
2478     case 4:
2479         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2480         break;
2481     default:
2482         CRASH();
2483     }
2484     if (outOfBounds.isSet())
2485         outOfBounds.link(&m_jit);
2486     noResult(node);
2487 }
2488
2489 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2490 {
2491     ASSERT(isFloat(type));
2492     
2493     SpeculateCellOperand base(this, node->child1());
2494     SpeculateStrictInt32Operand property(this, node->child2());
2495     StorageOperand storage(this, node->child3());
2496
2497     GPRReg baseReg = base.gpr();
2498     GPRReg propertyReg = property.gpr();
2499     GPRReg storageReg = storage.gpr();
2500
2501     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2502
2503     FPRTemporary result(this);
2504     FPRReg resultReg = result.fpr();
2505     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2506     switch (elementSize(type)) {
2507     case 4:
2508         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2509         m_jit.convertFloatToDouble(resultReg, resultReg);
2510         break;
2511     case 8: {
2512         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2513         break;
2514     }
2515     default:
2516         RELEASE_ASSERT_NOT_REACHED();
2517     }
2518     
2519     doubleResult(resultReg, node);
2520 }
2521
2522 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2523 {
2524     ASSERT(isFloat(type));
2525     
2526     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2527     GPRReg storageReg = storage.gpr();
2528     
2529     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2530     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2531
2532     SpeculateDoubleOperand valueOp(this, valueUse);
2533     FPRTemporary scratch(this);
2534     FPRReg valueFPR = valueOp.fpr();
2535     FPRReg scratchFPR = scratch.fpr();
2536
2537     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2538     
2539     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2540     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2541         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2542         outOfBounds = MacroAssembler::Jump();
2543     }
2544     
2545     switch (elementSize(type)) {
2546     case 4: {
2547         m_jit.moveDouble(valueFPR, scratchFPR);
2548         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2549         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2550         break;
2551     }
2552     case 8:
2553         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2554         break;
2555     default:
2556         RELEASE_ASSERT_NOT_REACHED();
2557     }
2558     if (outOfBounds.isSet())
2559         outOfBounds.link(&m_jit);
2560     noResult(node);
2561 }
2562
2563 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2564 {
2565     // Check that prototype is an object.
2566     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfCellNotObject(prototypeReg));
2567     
2568     // Initialize scratchReg with the value being checked.
2569     m_jit.move(valueReg, scratchReg);
2570     
2571     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2572     MacroAssembler::Label loop(&m_jit);
2573     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2574     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2575     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2576 #if USE(JSVALUE64)
2577     branchIsCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2578 #else
2579     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2580 #endif
2581     
2582     // No match - result is false.
2583 #if USE(JSVALUE64)
2584     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2585 #else
2586     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2587 #endif
2588     MacroAssembler::Jump putResult = m_jit.jump();
2589     
2590     isInstance.link(&m_jit);
2591 #if USE(JSVALUE64)
2592     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2593 #else
2594     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2595 #endif
2596     
2597     putResult.link(&m_jit);
2598 }
2599
2600 void SpeculativeJIT::compileInstanceOf(Node* node)
2601 {
2602     if (node->child1().useKind() == UntypedUse) {
2603         // It might not be a cell. Speculate less aggressively.
2604         // Or: it might only be used once (i.e. by us), so we get zero benefit
2605         // from speculating any more aggressively than we absolutely need to.
2606         
2607         JSValueOperand value(this, node->child1());
2608         SpeculateCellOperand prototype(this, node->child2());
2609         GPRTemporary scratch(this);
2610         GPRTemporary scratch2(this);
2611         
2612         GPRReg prototypeReg = prototype.gpr();
2613         GPRReg scratchReg = scratch.gpr();
2614         GPRReg scratch2Reg = scratch2.gpr();
2615         
2616         MacroAssembler::Jump isCell = branchIsCell(value.jsValueRegs());
2617         GPRReg valueReg = value.jsValueRegs().payloadGPR();
2618         moveFalseTo(scratchReg);
2619
2620         MacroAssembler::Jump done = m_jit.jump();
2621         
2622         isCell.link(&m_jit);
2623         
2624         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2625         
2626         done.link(&m_jit);
2627
2628         blessedBooleanResult(scratchReg, node);
2629         return;
2630     }
2631     
2632     SpeculateCellOperand value(this, node->child1());
2633     SpeculateCellOperand prototype(this, node->child2());
2634     
2635     GPRTemporary scratch(this);
2636     GPRTemporary scratch2(this);
2637     
2638     GPRReg valueReg = value.gpr();
2639     GPRReg prototypeReg = prototype.gpr();
2640     GPRReg scratchReg = scratch.gpr();
2641     GPRReg scratch2Reg = scratch2.gpr();
2642     
2643     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2644
2645     blessedBooleanResult(scratchReg, node);
2646 }
2647
2648 void SpeculativeJIT::compileAdd(Node* node)
2649 {
2650     switch (node->binaryUseKind()) {
2651     case Int32Use: {
2652         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2653         
2654         if (node->child1()->isInt32Constant()) {
2655             int32_t imm1 = node->child1()->asInt32();
2656             SpeculateInt32Operand op2(this, node->child2());
2657             GPRTemporary result(this);
2658
2659             if (!shouldCheckOverflow(node->arithMode())) {
2660                 m_jit.move(op2.gpr(), result.gpr());
2661                 m_jit.add32(Imm32(imm1), result.gpr());
2662             } else
2663                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
2664
2665             int32Result(result.gpr(), node);
2666             return;
2667         }
2668         
2669         if (node->child2()->isInt32Constant()) {
2670             SpeculateInt32Operand op1(this, node->child1());
2671             int32_t imm2 = node->child2()->asInt32();
2672             GPRTemporary result(this);
2673                 
2674             if (!shouldCheckOverflow(node->arithMode())) {
2675                 m_jit.move(op1.gpr(), result.gpr());
2676                 m_jit.add32(Imm32(imm2), result.gpr());
2677             } else
2678                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
2679
2680             int32Result(result.gpr(), node);
2681             return;
2682         }
2683                 
2684         SpeculateInt32Operand op1(this, node->child1());
2685         SpeculateInt32Operand op2(this, node->child2());
2686         GPRTemporary result(this, Reuse, op1, op2);
2687
2688         GPRReg gpr1 = op1.gpr();
2689         GPRReg gpr2 = op2.gpr();
2690         GPRReg gprResult = result.gpr();
2691
2692         if (!shouldCheckOverflow(node->arithMode())) {
2693             if (gpr1 == gprResult)
2694                 m_jit.add32(gpr2, gprResult);
2695             else {
2696                 m_jit.move(gpr2, gprResult);
2697                 m_jit.add32(gpr1, gprResult);
2698             }
2699         } else {
2700             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
2701                 
2702             if (gpr1 == gprResult)
2703                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
2704             else if (gpr2 == gprResult)
2705                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
2706             else
2707                 speculationCheck(Overflow, JSValueRegs(), 0, check);
2708         }
2709
2710         int32Result(gprResult, node);
2711         return;
2712     }
2713         
2714 #if USE(JSVALUE64)
2715     case Int52RepUse: {
2716         ASSERT(shouldCheckOverflow(node->arithMode()));
2717         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2718
2719         // Will we need an overflow check? If we can prove that neither input can be
2720         // Int52 then the overflow check will not be necessary.
2721         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2722             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2723             SpeculateWhicheverInt52Operand op1(this, node->child1());
2724             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2725             GPRTemporary result(this, Reuse, op1);
2726             m_jit.move(op1.gpr(), result.gpr());
2727             m_jit.add64(op2.gpr(), result.gpr());
2728             int52Result(result.gpr(), node, op1.format());
2729             return;
2730         }
2731         
2732         SpeculateInt52Operand op1(this, node->child1());
2733         SpeculateInt52Operand op2(this, node->child2());
2734         GPRTemporary result(this);
2735         m_jit.move(op1.gpr(), result.gpr());
2736         speculationCheck(
2737             Int52Overflow, JSValueRegs(), 0,
2738             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2739         int52Result(result.gpr(), node);
2740         return;
2741     }
2742 #endif // USE(JSVALUE64)
2743     
2744     case DoubleRepUse: {
2745         SpeculateDoubleOperand op1(this, node->child1());
2746         SpeculateDoubleOperand op2(this, node->child2());
2747         FPRTemporary result(this, op1, op2);
2748
2749         FPRReg reg1 = op1.fpr();
2750         FPRReg reg2 = op2.fpr();
2751         m_jit.addDouble(reg1, reg2, result.fpr());
2752
2753         doubleResult(result.fpr(), node);
2754         return;
2755     }
2756         
2757     default:
2758         RELEASE_ASSERT_NOT_REACHED();
2759         break;
2760     }
2761 }
2762
2763 void SpeculativeJIT::compileMakeRope(Node* node)
2764 {
2765     ASSERT(node->child1().useKind() == KnownStringUse);
2766     ASSERT(node->child2().useKind() == KnownStringUse);
2767     ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
2768     
2769     SpeculateCellOperand op1(this, node->child1());
2770     SpeculateCellOperand op2(this, node->child2());
2771     SpeculateCellOperand op3(this, node->child3());
2772     GPRTemporary result(this);
2773     GPRTemporary allocator(this);
2774     GPRTemporary scratch(this);
2775     
2776     GPRReg opGPRs[3];
2777     unsigned numOpGPRs;
2778     opGPRs[0] = op1.gpr();
2779     opGPRs[1] = op2.gpr();
2780     if (node->child3()) {
2781         opGPRs[2] = op3.gpr();
2782         numOpGPRs = 3;
2783     } else {
2784         opGPRs[2] = InvalidGPRReg;
2785         numOpGPRs = 2;
2786     }
2787     GPRReg resultGPR = result.gpr();
2788     GPRReg allocatorGPR = allocator.gpr();
2789     GPRReg scratchGPR = scratch.gpr();
2790     
2791     JITCompiler::JumpList slowPath;
2792     MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithImmortalStructureDestructor(sizeof(JSRopeString));
2793     m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
2794     emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
2795         
2796     m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
2797     for (unsigned i = 0; i < numOpGPRs; ++i)
2798         m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2799     for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
2800         m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2801     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
2802     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
2803     if (!ASSERT_DISABLED) {
2804         JITCompiler::Jump ok = m_jit.branch32(
2805             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2806         m_jit.abortWithReason(DFGNegativeStringLength);
2807         ok.link(&m_jit);
2808     }
2809     for (unsigned i = 1; i < numOpGPRs; ++i) {
2810         m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
2811         speculationCheck(
2812             Uncountable, JSValueSource(), nullptr,
2813             m_jit.branchAdd32(
2814                 JITCompiler::Overflow,
2815                 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
2816     }
2817     m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
2818     m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
2819     if (!ASSERT_DISABLED) {
2820         JITCompiler::Jump ok = m_jit.branch32(
2821             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2822         m_jit.abortWithReason(DFGNegativeStringLength);
2823         ok.link(&m_jit);
2824     }
2825     m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
2826     
2827     switch (numOpGPRs) {
2828     case 2:
2829         addSlowPathGenerator(slowPathCall(
2830             slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
2831         break;
2832     case 3:
2833         addSlowPathGenerator(slowPathCall(
2834             slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
2835         break;
2836     default:
2837         RELEASE_ASSERT_NOT_REACHED();
2838         break;
2839     }
2840         
2841     cellResult(resultGPR, node);
2842 }
2843
2844 void SpeculativeJIT::compileArithSub(Node* node)
2845 {
2846     switch (node->binaryUseKind()) {
2847     case Int32Use: {
2848         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2849         
2850         if (node->child2()->isNumberConstant()) {
2851             SpeculateInt32Operand op1(this, node->child1());
2852             int32_t imm2 = node->child2()->asInt32();
2853             GPRTemporary result(this);
2854
2855             if (!shouldCheckOverflow(node->arithMode())) {
2856                 m_jit.move(op1.gpr(), result.gpr());
2857                 m_jit.sub32(Imm32(imm2), result.gpr());
2858             } else {
2859                 GPRTemporary scratch(this);
2860                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
2861             }
2862
2863             int32Result(result.gpr(), node);
2864             return;
2865         }
2866             
2867         if (node->child1()->isNumberConstant()) {
2868             int32_t imm1 = node->child1()->asInt32();
2869             SpeculateInt32Operand op2(this, node->child2());
2870             GPRTemporary result(this);
2871                 
2872             m_jit.move(Imm32(imm1), result.gpr());
2873             if (!shouldCheckOverflow(node->arithMode()))
2874                 m_jit.sub32(op2.gpr(), result.gpr());
2875             else
2876                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2877                 
2878             int32Result(result.gpr(), node);
2879             return;
2880         }
2881             
2882         SpeculateInt32Operand op1(this, node->child1());
2883         SpeculateInt32Operand op2(this, node->child2());
2884         GPRTemporary result(this);
2885
2886         if (!shouldCheckOverflow(node->arithMode())) {
2887             m_jit.move(op1.gpr(), result.gpr());
2888             m_jit.sub32(op2.gpr(), result.gpr());
2889         } else
2890             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
2891
2892         int32Result(result.gpr(), node);
2893         return;
2894     }
2895         
2896 #if USE(JSVALUE64)
2897     case Int52RepUse: {
2898         ASSERT(shouldCheckOverflow(node->arithMode()));
2899         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2900
2901         // Will we need an overflow check? If we can prove that neither input can be
2902         // Int52 then the overflow check will not be necessary.
2903         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2904             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2905             SpeculateWhicheverInt52Operand op1(this, node->child1());
2906             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2907             GPRTemporary result(this, Reuse, op1);
2908             m_jit.move(op1.gpr(), result.gpr());
2909             m_jit.sub64(op2.gpr(), result.gpr());
2910             int52Result(result.gpr(), node, op1.format());
2911             return;
2912         }
2913         
2914         SpeculateInt52Operand op1(this, node->child1());
2915         SpeculateInt52Operand op2(this, node->child2());
2916         GPRTemporary result(this);
2917         m_jit.move(op1.gpr(), result.gpr());
2918         speculationCheck(
2919             Int52Overflow, JSValueRegs(), 0,
2920             m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2921         int52Result(result.gpr(), node);
2922         return;
2923     }
2924 #endif // USE(JSVALUE64)
2925
2926     case DoubleRepUse: {
2927         SpeculateDoubleOperand op1(this, node->child1());
2928         SpeculateDoubleOperand op2(this, node->child2());
2929         FPRTemporary result(this, op1);
2930
2931         FPRReg reg1 = op1.fpr();
2932         FPRReg reg2 = op2.fpr();
2933         m_jit.subDouble(reg1, reg2, result.fpr());
2934
2935         doubleResult(result.fpr(), node);
2936         return;
2937     }
2938         
2939     default:
2940         RELEASE_ASSERT_NOT_REACHED();
2941         return;
2942     }
2943 }
2944
2945 void SpeculativeJIT::compileArithNegate(Node* node)
2946 {
2947     switch (node->child1().useKind()) {
2948     case Int32Use: {
2949         SpeculateInt32Operand op1(this, node->child1());
2950         GPRTemporary result(this);
2951
2952         m_jit.move(op1.gpr(), result.gpr());
2953
2954         // Note: there is no notion of being not used as a number, but someone
2955         // caring about negative zero.
2956         
2957         if (!shouldCheckOverflow(node->arithMode()))
2958             m_jit.neg32(result.gpr());
2959         else if (!shouldCheckNegativeZero(node->arithMode()))
2960             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
2961         else {
2962             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
2963             m_jit.neg32(result.gpr());
2964         }
2965
2966         int32Result(result.gpr(), node);
2967         return;
2968     }
2969
2970 #if USE(JSVALUE64)
2971     case Int52RepUse: {
2972         ASSERT(shouldCheckOverflow(node->arithMode()));
2973         
2974         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)) {
2975             SpeculateWhicheverInt52Operand op1(this, node->child1());
2976             GPRTemporary result(this);
2977             GPRReg op1GPR = op1.gpr();
2978             GPRReg resultGPR = result.gpr();
2979             m_jit.move(op1GPR, resultGPR);
2980             m_jit.neg64(resultGPR);
2981             if (shouldCheckNegativeZero(node->arithMode())) {
2982                 speculationCheck(
2983                     NegativeZero, JSValueRegs(), 0,
2984                     m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
2985             }
2986             int52Result(resultGPR, node, op1.format());
2987             return;
2988         }
2989         
2990         SpeculateInt52Operand op1(this, node->child1());
2991         GPRTemporary result(this);
2992         GPRReg op1GPR = op1.gpr();
2993         GPRReg resultGPR = result.gpr();
2994         m_jit.move(op1GPR, resultGPR);
2995         speculationCheck(
2996             Int52Overflow, JSValueRegs(), 0,
2997             m_jit.branchNeg64(MacroAssembler::Overflow, resultGPR));
2998         if (shouldCheckNegativeZero(node->arithMode())) {
2999             speculationCheck(
3000                 NegativeZero, JSValueRegs(), 0,
3001                 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3002         }
3003         int52Result(resultGPR, node);
3004         return;
3005     }
3006 #endif // USE(JSVALUE64)
3007         
3008     case DoubleRepUse: {
3009         SpeculateDoubleOperand op1(this, node->child1());
3010         FPRTemporary result(this);
3011         
3012         m_jit.negateDouble(op1.fpr(), result.fpr());
3013         
3014         doubleResult(result.fpr(), node);
3015         return;
3016     }
3017         
3018     default:
3019         RELEASE_ASSERT_NOT_REACHED();
3020         return;
3021     }
3022 }
3023 void SpeculativeJIT::compileArithMul(Node* node)
3024 {
3025     switch (node->binaryUseKind()) {
3026     case Int32Use: {
3027         SpeculateInt32Operand op1(this, node->child1());
3028         SpeculateInt32Operand op2(this, node->child2());
3029         GPRTemporary result(this);
3030
3031         GPRReg reg1 = op1.gpr();
3032         GPRReg reg2 = op2.gpr();
3033
3034         // We can perform truncated multiplications if we get to this point, because if the
3035         // fixup phase could not prove that it would be safe, it would have turned us into
3036         // a double multiplication.
3037         if (!shouldCheckOverflow(node->arithMode())) {
3038             m_jit.move(reg1, result.gpr());
3039             m_jit.mul32(reg2, result.gpr());
3040         } else {
3041             speculationCheck(
3042                 Overflow, JSValueRegs(), 0,
3043                 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3044         }
3045             
3046         // Check for negative zero, if the users of this node care about such things.
3047         if (shouldCheckNegativeZero(node->arithMode())) {
3048             MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3049             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
3050             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
3051             resultNonZero.link(&m_jit);
3052         }
3053
3054         int32Result(result.gpr(), node);
3055         return;
3056     }
3057     
3058 #if USE(JSVALUE64)   
3059     case Int52RepUse: {
3060         ASSERT(shouldCheckOverflow(node->arithMode()));
3061         
3062         // This is super clever. We want to do an int52 multiplication and check the
3063         // int52 overflow bit. There is no direct hardware support for this, but we do
3064         // have the ability to do an int64 multiplication and check the int64 overflow
3065         // bit. We leverage that. Consider that a, b are int52 numbers inside int64
3066         // registers, with the high 12 bits being sign-extended. We can do:
3067         //
3068         //     (a * (b << 12))
3069         //
3070         // This will give us a left-shifted int52 (value is in high 52 bits, low 16
3071         // bits are zero) plus the int52 overflow bit. I.e. whether this 64-bit
3072         // multiplication overflows is identical to whether the 'a * b' 52-bit
3073         // multiplication overflows.
3074         //
3075         // In our nomenclature, this is:
3076         //
3077         //     strictInt52(a) * int52(b) => int52
3078         //
3079         // That is "strictInt52" means unshifted and "int52" means left-shifted by 16
3080         // bits.
3081         //
3082         // We don't care which of op1 or op2 serves as the left-shifted operand, so
3083         // we just do whatever is more convenient for op1 and have op2 do the
3084         // opposite. This ensures that we do at most one shift.
3085
3086         SpeculateWhicheverInt52Operand op1(this, node->child1());
3087         SpeculateWhicheverInt52Operand op2(this, node->child2(), OppositeShift, op1);
3088         GPRTemporary result(this);
3089         
3090         GPRReg op1GPR = op1.gpr();
3091         GPRReg op2GPR = op2.gpr();
3092         GPRReg resultGPR = result.gpr();
3093         
3094         m_jit.move(op1GPR, resultGPR);
3095         speculationCheck(
3096             Int52Overflow, JSValueRegs(), 0,
3097             m_jit.branchMul64(MacroAssembler::Overflow, op2GPR, resultGPR));
3098         
3099         if (shouldCheckNegativeZero(node->arithMode())) {
3100             MacroAssembler::Jump resultNonZero = m_jit.branchTest64(
3101                 MacroAssembler::NonZero, resultGPR);
3102             speculationCheck(
3103                 NegativeZero, JSValueRegs(), 0,
3104                 m_jit.branch64(MacroAssembler::LessThan, op1GPR, TrustedImm64(0)));
3105             speculationCheck(
3106                 NegativeZero, JSValueRegs(), 0,
3107                 m_jit.branch64(MacroAssembler::LessThan, op2GPR, TrustedImm64(0)));
3108             resultNonZero.link(&m_jit);
3109         }
3110         
3111         int52Result(resultGPR, node);
3112         return;
3113     }
3114 #endif // USE(JSVALUE64)
3115         
3116     case DoubleRepUse: {
3117         SpeculateDoubleOperand op1(this, node->child1());
3118         SpeculateDoubleOperand op2(this, node->child2());
3119         FPRTemporary result(this, op1, op2);
3120         
3121         FPRReg reg1 = op1.fpr();
3122         FPRReg reg2 = op2.fpr();
3123         
3124         m_jit.mulDouble(reg1, reg2, result.fpr());
3125         
3126         doubleResult(result.fpr(), node);
3127         return;
3128     }
3129         
3130     default:
3131         RELEASE_ASSERT_NOT_REACHED();
3132         return;
3133     }
3134 }
3135
3136 void SpeculativeJIT::compileArithDiv(Node* node)
3137 {
3138     switch (node->binaryUseKind()) {
3139     case Int32Use: {
3140 #if CPU(X86) || CPU(X86_64)
3141         SpeculateInt32Operand op1(this, node->child1());
3142         SpeculateInt32Operand op2(this, node->child2());
3143         GPRTemporary eax(this, X86Registers::eax);
3144         GPRTemporary edx(this, X86Registers::edx);
3145         GPRReg op1GPR = op1.gpr();
3146         GPRReg op2GPR = op2.gpr();
3147     
3148         GPRReg op2TempGPR;
3149         GPRReg temp;
3150         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3151             op2TempGPR = allocate();
3152             temp = op2TempGPR;
3153         } else {
3154             op2TempGPR = InvalidGPRReg;
3155             if (op1GPR == X86Registers::eax)
3156                 temp = X86Registers::edx;
3157             else
3158                 temp = X86Registers::eax;
3159         }
3160     
3161         ASSERT(temp != op1GPR);
3162         ASSERT(temp != op2GPR);
3163     
3164         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3165     
3166         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3167     
3168         JITCompiler::JumpList done;
3169         if (shouldCheckOverflow(node->arithMode())) {
3170             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3171             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3172         } else {
3173             // This is the case where we convert the result to an int after we're done, and we
3174             // already know that the denominator is either -1 or 0. So, if the denominator is
3175             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3176             // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
3177             // are happy to fall through to a normal division, since we're just dividing
3178             // something by negative 1.
3179         
3180             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3181             m_jit.move(TrustedImm32(0), eax.gpr());
3182             done.append(m_jit.jump());
3183         
3184             notZero.link(&m_jit);
3185             JITCompiler::Jump notNeg2ToThe31 =
3186                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3187             m_jit.zeroExtend32ToPtr(op1GPR, eax.gpr());
3188             done.append(m_jit.jump());
3189         
3190             notNeg2ToThe31.link(&m_jit);
3191         }
3192     
3193         safeDenominator.link(&m_jit);
3194     
3195         // If the user cares about negative zero, then speculate that we're not about
3196         // to produce negative zero.
3197         if (shouldCheckNegativeZero(node->arithMode())) {
3198             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3199             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3200             numeratorNonZero.link(&m_jit);
3201         }
3202     
3203         if (op2TempGPR != InvalidGPRReg) {
3204             m_jit.move(op2GPR, op2TempGPR);
3205             op2GPR = op2TempGPR;
3206         }
3207             
3208         m_jit.move(op1GPR, eax.gpr());
3209         m_jit.assembler().cdq();
3210         m_jit.assembler().idivl_r(op2GPR);
3211             
3212         if (op2TempGPR != InvalidGPRReg)
3213             unlock(op2TempGPR);
3214
3215         // Check that there was no remainder. If there had been, then we'd be obligated to
3216         // produce a double result instead.
3217         if (shouldCheckOverflow(node->arithMode()))
3218             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
3219         
3220         done.link(&m_jit);
3221         int32Result(eax.gpr(), node);
3222 #elif CPU(APPLE_ARMV7S) || CPU(ARM64)
3223         SpeculateInt32Operand op1(this, node->child1());
3224         SpeculateInt32Operand op2(this, node->child2());
3225         GPRReg op1GPR = op1.gpr();
3226         GPRReg op2GPR = op2.gpr();
3227         GPRTemporary quotient(this);
3228         GPRTemporary multiplyAnswer(this);
3229
3230         // If the user cares about negative zero, then speculate that we're not about
3231         // to produce negative zero.
3232         if (shouldCheckNegativeZero(node->arithMode())) {
3233             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3234             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3235             numeratorNonZero.link(&m_jit);
3236         }
3237
3238         m_jit.assembler().sdiv<32>(quotient.gpr(), op1GPR, op2GPR);
3239
3240         // Check that there was no remainder. If there had been, then we'd be obligated to
3241         // produce a double result instead.
3242         if (shouldCheckOverflow(node->arithMode())) {
3243             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
3244             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
3245         }
3246
3247         int32Result(quotient.gpr(), node);
3248 #else
3249         RELEASE_ASSERT_NOT_REACHED();
3250 #endif
3251         break;
3252     }
3253         
3254     case DoubleRepUse: {
3255         SpeculateDoubleOperand op1(this, node->child1());
3256         SpeculateDoubleOperand op2(this, node->child2());
3257         FPRTemporary result(this, op1);
3258         
3259         FPRReg reg1 = op1.fpr();
3260         FPRReg reg2 = op2.fpr();
3261         m_jit.divDouble(reg1, reg2, result.fpr());
3262         
3263         doubleResult(result.fpr(), node);
3264         break;
3265     }
3266         
3267     default:
3268         RELEASE_ASSERT_NOT_REACHED();
3269         break;
3270     }
3271 }
3272
3273 void SpeculativeJIT::compileArithMod(Node* node)
3274 {
3275     switch (node->binaryUseKind()) {
3276     case Int32Use: {
3277         // In the fast path, the dividend value could be the final result
3278         // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
3279         SpeculateStrictInt32Operand op1(this, node->child1());
3280         
3281         if (node->child2()->isInt32Constant()) {
3282             int32_t divisor = node->child2()->asInt32();
3283             if (divisor > 1 && hasOneBitSet(divisor)) {
3284                 unsigned logarithm = WTF::fastLog2(divisor);
3285                 GPRReg dividendGPR = op1.gpr();
3286                 GPRTemporary result(this);
3287                 GPRReg resultGPR = result.gpr();
3288
3289                 // This is what LLVM generates. It's pretty crazy. Here's my
3290                 // attempt at understanding it.
3291                 
3292                 // First, compute either divisor - 1, or 0, depending on whether
3293                 // the dividend is negative:
3294                 //
3295                 // If dividend < 0:  resultGPR = divisor - 1
3296                 // If dividend >= 0: resultGPR = 0
3297                 m_jit.move(dividendGPR, resultGPR);
3298                 m_jit.rshift32(TrustedImm32(31), resultGPR);
3299                 m_jit.urshift32(TrustedImm32(32 - logarithm), resultGPR);
3300                 
3301                 // Add in the dividend, so that:
3302                 //
3303                 // If dividend < 0:  resultGPR = dividend + divisor - 1
3304                 // If dividend >= 0: resultGPR = dividend
3305                 m_jit.add32(dividendGPR, resultGPR);
3306                 
3307                 // Mask so as to only get the *high* bits. This rounds down
3308                 // (towards negative infinity) resultGPR to the nearest multiple
3309                 // of divisor, so that:
3310                 //
3311                 // If dividend < 0:  resultGPR = floor((dividend + divisor - 1) / divisor)
3312                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3313                 //
3314                 // Note that this can be simplified to:
3315                 //
3316                 // If dividend < 0:  resultGPR = ceil(dividend / divisor)
3317                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3318                 //
3319                 // Note that if the dividend is negative, resultGPR will also be negative.
3320                 // Regardless of the sign of dividend, resultGPR will be rounded towards
3321                 // zero, because of how things are conditionalized.
3322                 m_jit.and32(TrustedImm32(-divisor), resultGPR);
3323                 
3324                 // Subtract resultGPR from dividendGPR, which yields the remainder:
3325                 //
3326                 // resultGPR = dividendGPR - resultGPR
3327                 m_jit.neg32(resultGPR);
3328                 m_jit.add32(dividendGPR, resultGPR);
3329                 
3330                 if (shouldCheckNegativeZero(node->arithMode())) {
3331                     // Check that we're not about to create negative zero.
3332                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3333                     speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, resultGPR));
3334                     numeratorPositive.link(&m_jit);
3335                 }
3336
3337                 int32Result(resultGPR, node);
3338                 return;
3339             }
3340         }
3341         
3342 #if CPU(X86) || CPU(X86_64)
3343         if (node->child2()->isInt32Constant()) {
3344             int32_t divisor = node->child2()->asInt32();
3345             if (divisor && divisor != -1) {
3346                 GPRReg op1Gpr = op1.gpr();
3347
3348                 GPRTemporary eax(this, X86Registers::eax);
3349                 GPRTemporary edx(this, X86Registers::edx);
3350                 GPRTemporary scratch(this);
3351                 GPRReg scratchGPR = scratch.gpr();
3352
3353                 GPRReg op1SaveGPR;
3354                 if (op1Gpr == X86Registers::eax || op1Gpr == X86Registers::edx) {
3355                     op1SaveGPR = allocate();
3356                     ASSERT(op1Gpr != op1SaveGPR);
3357                     m_jit.move(op1Gpr, op1SaveGPR);
3358                 } else
3359                     op1SaveGPR = op1Gpr;
3360                 ASSERT(op1SaveGPR != X86Registers::eax);
3361                 ASSERT(op1SaveGPR != X86Registers::edx);
3362
3363                 m_jit.move(op1Gpr, eax.gpr());
3364                 m_jit.move(TrustedImm32(divisor), scratchGPR);
3365                 m_jit.assembler().cdq();
3366                 m_jit.assembler().idivl_r(scratchGPR);
3367                 if (shouldCheckNegativeZero(node->arithMode())) {
3368                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3369                     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3370                     numeratorPositive.link(&m_jit);
3371                 }
3372             
3373                 if (op1SaveGPR != op1Gpr)
3374                     unlock(op1SaveGPR);
3375
3376                 int32Result(edx.gpr(), node);
3377                 return;
3378             }
3379         }
3380 #endif
3381
3382         SpeculateInt32Operand op2(this, node->child2());
3383 #if CPU(X86) || CPU(X86_64)
3384         GPRTemporary eax(this, X86Registers::eax);
3385         GPRTemporary edx(this, X86Registers::edx);
3386         GPRReg op1GPR = op1.gpr();
3387         GPRReg op2GPR = op2.gpr();
3388     
3389         GPRReg op2TempGPR;
3390         GPRReg temp;
3391         GPRReg op1SaveGPR;
3392     
3393         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3394             op2TempGPR = allocate();
3395             temp = op2TempGPR;
3396         } else {
3397             op2TempGPR = InvalidGPRReg;
3398             if (op1GPR == X86Registers::eax)
3399                 temp = X86Registers::edx;
3400             else
3401                 temp = X86Registers::eax;
3402         }
3403     
3404         if (op1GPR == X86Registers::eax || op1GPR == X86Registers::edx) {
3405             op1SaveGPR = allocate();
3406             ASSERT(op1GPR != op1SaveGPR);
3407             m_jit.move(op1GPR, op1SaveGPR);
3408         } else
3409             op1SaveGPR = op1GPR;
3410     
3411         ASSERT(temp != op1GPR);
3412         ASSERT(temp != op2GPR);
3413         ASSERT(op1SaveGPR != X86Registers::eax);
3414         ASSERT(op1SaveGPR != X86Registers::edx);
3415     
3416         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3417     
3418         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3419     
3420         JITCompiler::JumpList done;
3421         
3422         // FIXME: -2^31 / -1 will actually yield negative zero, so we could have a
3423         // separate case for that. But it probably doesn't matter so much.
3424         if (shouldCheckOverflow(node->arithMode())) {
3425             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3426             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3427         } else {
3428             // This is the case where we convert the result to an int after we're done, and we
3429             // already know that the denominator is either -1 or 0. So, if the denominator is
3430             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3431             // -1) and the numerator is -2^31 then the result should be 0. Otherwise we are
3432             // happy to fall through to a normal division, since we're just dividing something
3433             // by negative 1.
3434         
3435             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3436             m_jit.move(TrustedImm32(0), edx.gpr());
3437             done.append(m_jit.jump());
3438         
3439             notZero.link(&m_jit);
3440             JITCompiler::Jump notNeg2ToThe31 =
3441                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3442             m_jit.move(TrustedImm32(0), edx.gpr());
3443             done.append(m_jit.jump());
3444         
3445             notNeg2ToThe31.link(&m_jit);
3446         }
3447         
3448         safeDenominator.link(&m_jit);
3449             
3450         if (op2TempGPR != InvalidGPRReg) {
3451             m_jit.move(op2GPR, op2TempGPR);
3452             op2GPR = op2TempGPR;
3453         }
3454             
3455         m_jit.move(op1GPR, eax.gpr());
3456         m_jit.assembler().cdq();
3457         m_jit.assembler().idivl_r(op2GPR);
3458             
3459         if (op2TempGPR != InvalidGPRReg)
3460             unlock(op2TempGPR);
3461
3462         // Check that we're not about to create negative zero.
3463         if (shouldCheckNegativeZero(node->arithMode())) {
3464             JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3465             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3466             numeratorPositive.link(&m_jit);
3467         }
3468     
3469         if (op1SaveGPR != op1GPR)
3470             unlock(op1SaveGPR);
3471             
3472         done.link(&m_jit);
3473         int32Result(edx.gpr(), node);
3474
3475 #elif CPU(ARM64) || CPU(APPLE_ARMV7S)
3476         GPRTemporary temp(this);
3477         GPRTemporary quotientThenRemainder(this);
3478         GPRTemporary multiplyAnswer(this);
3479         GPRReg dividendGPR = op1.gpr();
3480         GPRReg divisorGPR = op2.gpr();
3481         GPRReg quotientThenRemainderGPR = quotientThenRemainder.gpr();
3482         GPRReg multiplyAnswerGPR = multiplyAnswer.gpr();
3483
3484         JITCompiler::JumpList done;
3485     
3486         if (shouldCheckOverflow(node->arithMode()))
3487             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, divisorGPR));
3488         else {
3489             JITCompiler::Jump denominatorNotZero = m_jit.branchTest32(JITCompiler::NonZero, divisorGPR);
3490             m_jit.move(divisorGPR, quotientThenRemainderGPR);
3491             done.append(m_jit.jump());
3492             denominatorNotZero.link(&m_jit);
3493         }
3494
3495         m_jit.assembler().sdiv<32>(quotientThenRemainderGPR, dividendGPR, divisorGPR);
3496         // FIXME: It seems like there are cases where we don't need this? What if we have
3497         // arithMode() == Arith::Unchecked?
3498         // https://bugs.webkit.org/show_bug.cgi?id=126444
3499         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotientThenRemainderGPR, divisorGPR, multiplyAnswerGPR));
3500 #if CPU(APPLE_ARMV7S)
3501         m_jit.assembler().sub(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3502 #else
3503         m_jit.assembler().sub<32>(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3504 #endif
3505
3506         // If the user cares about negative zero, then speculate that we're not about
3507         // to produce negative zero.
3508         if (shouldCheckNegativeZero(node->arithMode())) {
3509             // Check that we're not about to create negative zero.
3510             JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3511             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, quotientThenRemainderGPR));
3512             numeratorPositive.link(&m_jit);
3513         }
3514
3515         done.link(&m_jit);
3516
3517         int32Result(quotientThenRemainderGPR, node);
3518 #else // not architecture that can do integer division
3519         RELEASE_ASSERT_NOT_REACHED();
3520 #endif
3521         return;
3522     }
3523         
3524     case DoubleRepUse: {
3525         SpeculateDoubleOperand op1(this, node->child1());
3526         SpeculateDoubleOperand op2(this, node->child2());
3527         
3528         FPRReg op1FPR = op1.fpr();
3529         FPRReg op2FPR = op2.fpr();
3530         
3531         flushRegisters();
3532         
3533         FPRResult result(this);
3534         
3535         callOperation(fmodAsDFGOperation, result.fpr(), op1FPR, op2FPR);
3536         
3537         doubleResult(result.fpr(), node);
3538         return;
3539     }
3540         
3541     default:
3542         RELEASE_ASSERT_NOT_REACHED();
3543         return;
3544     }
3545 }
3546
3547 // Returns true if the compare is fused with a subsequent branch.
3548 bool SpeculativeJIT::compare(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
3549 {
3550     if (compilePeepHoleBranch(node, condition, doubleCondition, operation))
3551         return true;
3552
3553     if (node->isBinaryUseKind(Int32Use)) {
3554         compileInt32Compare(node, condition);
3555         return false;
3556     }
3557     
3558 #if USE(JSVALUE64)
3559     if (node->isBinaryUseKind(Int52RepUse)) {
3560         compileInt52Compare(node, condition);
3561         return false;
3562     }
3563 #endif // USE(JSVALUE64)
3564     
3565     if (node->isBinaryUseKind(DoubleRepUse)) {
3566         compileDoubleCompare(node, doubleCondition);
3567         return false;
3568     }
3569     
3570     if (node->op() == CompareEq) {
3571         if (node->isBinaryUseKind(StringUse)) {
3572             compileStringEquality(node);
3573             return false;
3574         }
3575         
3576         if (node->isBinaryUseKind(BooleanUse)) {
3577             compileBooleanCompare(node, condition);
3578             return false;
3579         }
3580
3581         if (node->isBinaryUseKind(StringIdentUse)) {
3582             compileStringIdentEquality(node);
3583             return false;
3584         }
3585         
3586         if (node->isBinaryUseKind(ObjectUse)) {
3587             compileObjectEquality(node);
3588             return false;
3589         }
3590         
3591         if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse)) {
3592             compileObjectToObjectOrOtherEquality(node->child1(), node->child2());
3593             return false;
3594         }
3595         
3596         if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse)) {
3597             compileObjectToObjectOrOtherEquality(node->child2(), node->child1());
3598             return false;
3599         }
3600     }
3601     
3602     nonSpeculativeNonPeepholeCompare(node, condition, operation);
3603     return false;
3604 }
3605
3606 bool SpeculativeJIT::compileStrictEq(Node* node)
3607 {
3608     if (node->isBinaryUseKind(BooleanUse)) {
3609         unsigned branchIndexInBlock = detectPeepHoleBranch();
3610         if (branchIndexInBlock != UINT_MAX) {
3611             Node* branchNode = m_block->at(branchIndexInBlock);
3612             compilePeepHoleBooleanBranch(node, branchNode, MacroAssembler::Equal);
3613             use(node->child1());
3614             use(node->child2());
3615             m_indexInBlock = branchIndexInBlock;
3616             m_currentNode = branchNode;
3617             return true;
3618         }
3619         compileBooleanCompare(node, MacroAssembler::Equal);
3620         return false;
3621     }
3622
3623     if (node->isBinaryUseKind(Int32Use)) {
3624         unsigned branchIndexInBlock = detectPeepHoleBranch();
3625         if (branchIndexInBlock != UINT_MAX) {
3626             Node* branchNode = m_block->at(branchIndexInBlock);
3627             compilePeepHoleInt32Branch(node, branchNode, MacroAssembler::Equal);
3628             use(node->child1());
3629             use(node->child2());
3630             m_indexInBlock = branchIndexInBlock;
3631             m_currentNode = branchNode;
3632             return true;
3633         }
3634         compileInt32Compare(node, MacroAssembler::Equal);
3635         return false;
3636     }
3637     
3638 #if USE(JSVALUE64)   
3639     if (node->isBinaryUseKind(Int52RepUse)) {
3640         unsigned branchIndexInBlock = detectPeepHoleBranch();
3641         if (branchIndexInBlock != UINT_MAX) {
3642             Node* branchNode = m_block->at(branchIndexInBlock);
3643             compilePeepHoleInt52Branch(node, branchNode, MacroAssembler::Equal);
3644             use(node->child1());
3645             use(node->child2());
3646             m_indexInBlock = branchIndexInBlock;
3647             m_currentNode = branchNode;
3648             return true;
3649         }
3650         compileInt52Compare(node, MacroAssembler::Equal);
3651         return false;
3652     }
3653 #endif // USE(JSVALUE64)
3654
3655     if (node->isBinaryUseKind(DoubleRepUse)) {
3656         unsigned branchIndexInBlock = detectPeepHoleBranch();
3657         if (branchIndexInBlock != UINT_MAX) {
3658             Node* branchNode = m_block->at(branchIndexInBlock);
3659             compilePeepHoleDoubleBranch(node, branchNode, MacroAssembler::DoubleEqual);
3660             use(node->child1());
3661             use(node->child2());
3662             m_indexInBlock = branchIndexInBlock;
3663             m_currentNode = branchNode;
3664             return true;
3665         }
3666         compileDoubleCompare(node, MacroAssembler::DoubleEqual);
3667         return false;
3668     }
3669     
3670     if (node->isBinaryUseKind(StringUse)) {
3671         compileStringEquality(node);
3672         return false;
3673     }
3674     
3675     if (node->isBinaryUseKind(StringIdentUse)) {
3676         compileStringIdentEquality(node);
3677         return false;
3678     }
3679
3680     if (node->isBinaryUseKind(ObjectUse)) {
3681         unsigned branchIndexInBlock = detectPeepHoleBranch();
3682         if (branchIndexInBlock != UINT_MAX) {
3683             Node* branchNode = m_block->at(branchIndexInBlock);
3684             compilePeepHoleObjectEquality(node, branchNode);
3685             use(node->child1());
3686             use(node->child2());
3687             m_indexInBlock = branchIndexInBlock;
3688             m_currentNode = branchNode;
3689             return true;
3690         }
3691         compileObjectEquality(node);
3692         return false;
3693     }
3694
3695     if (node->isBinaryUseKind(MiscUse, UntypedUse)
3696         || node->isBinaryUseKind(UntypedUse, MiscUse)) {
3697         compileMiscStrictEq(node);
3698         return false;
3699     }
3700     
3701     if (node->isBinaryUseKind(StringIdentUse, NotStringVarUse)) {
3702         compileStringIdentToNotStringVarEquality(node, node->child1(), node->child2());
3703         return false;
3704     }
3705     
3706     if (node->isBinaryUseKind(NotStringVarUse, StringIdentUse)) {
3707         compileStringIdentToNotStringVarEquality(node, node->child2(), node->child1());
3708         return false;
3709     }
3710     
3711     if (node->isBinaryUseKind(StringUse, UntypedUse)) {
3712         compileStringToUntypedEquality(node, node->child1(), node->child2());
3713         return false;
3714     }
3715     
3716     if (node->isBinaryUseKind(UntypedUse, StringUse)) {
3717         compileStringToUntypedEquality(node, node->child2(), node->child1());
3718         return false;
3719     }
3720     
3721     RELEASE_ASSERT(node->isBinaryUseKind(UntypedUse));
3722     return nonSpeculativeStrictEq(node);
3723 }
3724
3725 void SpeculativeJIT::compileBooleanCompare(Node* node, MacroAssembler::RelationalCondition condition)
3726 {
3727     SpeculateBooleanOperand op1(this, node->child1());
3728     SpeculateBooleanOperand op2(this, node->child2());
3729     GPRTemporary result(this);
3730     
3731     m_jit.compare32(condition, op1.gpr(), op2.gpr(), result.gpr());
3732     
3733     unblessedBooleanResult(result.gpr(), node);
3734 }
3735
3736 void SpeculativeJIT::compileStringEquality(
3737     Node* node, GPRReg leftGPR, GPRReg rightGPR, GPRReg lengthGPR, GPRReg leftTempGPR,
3738     GPRReg rightTempGPR, GPRReg leftTemp2GPR, GPRReg rightTemp2GPR,
3739     JITCompiler::JumpList fastTrue, JITCompiler::JumpList fastFalse)
3740 {
3741     JITCompiler::JumpList trueCase;
3742     JITCompiler::JumpList falseCase;
3743     JITCompiler::JumpList slowCase;
3744     
3745     trueCase.append(fastTrue);
3746     falseCase.append(fastFalse);
3747
3748     m_jit.load32(MacroAssembler::Address(leftGPR, JSString::offsetOfLength()), lengthGPR);
3749     
3750     falseCase.append(m_jit.branch32(
3751         MacroAssembler::NotEqual,
3752         MacroAssembler::Address(rightGPR, JSString::offsetOfLength()),
3753         lengthGPR));
3754     
3755     trueCase.append(m_jit.branchTest32(MacroAssembler::Zero, lengthGPR));
3756     
3757     m_jit.loadPtr(MacroAssembler::Address(leftGPR, JSString::offsetOfValue()), leftTempGPR);
3758     m_jit.loadPtr(MacroAssembler::Address(rightGPR, JSString::offsetOfValue()), rightTempGPR);
3759     
3760     slowCase.append(m_jit.branchTestPtr(MacroAssembler::Zero, leftTempGPR));
3761     slowCase.append(m_jit.branchTestPtr(MacroAssembler::Zero, rightTempGPR));
3762     
3763     slowCase.append(m_jit.branchTest32(
3764         MacroAssembler::Zero,
3765         MacroAssembler::Address(leftTempGPR, StringImpl::flagsOffset()),
3766         TrustedImm32(StringImpl::flagIs8Bit())));
3767     slowCase.append(m_jit.branchTest32(
3768         MacroAssembler::Zero,
3769         MacroAssembler::Address(rightTempGPR, StringImpl::flagsOffset()),
3770         TrustedImm32(StringImpl::flagIs8Bit())));
3771     
3772     m_jit.loadPtr(MacroAssembler::Address(leftTempGPR, StringImpl::dataOffset()), leftTempGPR);
3773     m_jit.loadPtr(MacroAssembler::Address(rightTempGPR, StringImpl::dataOffset()), rightTempGPR);
3774     
3775     MacroAssembler::Label loop = m_jit.label();
3776     
3777     m_jit.sub32(TrustedImm32(1), lengthGPR);
3778
3779     // This isn't going to generate the best code on x86. But that's OK, it's still better
3780     // than not inlining.
3781     m_jit.load8(MacroAssembler::BaseIndex(leftTempGPR, lengthGPR, MacroAssembler::TimesOne), leftTemp2GPR);
3782     m_jit.load8(MacroAssembler::BaseIndex(rightTempGPR, lengthGPR, MacroAssembler::TimesOne), rightTemp2GPR);
3783     falseCase.append(m_jit.branch32(MacroAssembler::NotEqual, leftTemp2GPR, rightTemp2GPR));
3784     
3785     m_jit.branchTest32(MacroAssembler::NonZero, lengthGPR).linkTo(loop, &m_jit);
3786     
3787     trueCase.link(&m_jit);
3788     moveTrueTo(leftTempGPR);
3789     
3790     JITCompiler::Jump done = m_jit.jump();
3791
3792     falseCase.link(&m_jit);
3793     moveFalseTo(leftTempGPR);
3794