08744a25c3de10f9371a9ab0f3de927d8e240b27
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27
28 #if ENABLE(DFG_JIT)
29
30 #include "DFGSpeculativeJIT.h"
31
32 #include "Arguments.h"
33 #include "DFGAbstractInterpreterInlines.h"
34 #include "DFGArrayifySlowPathGenerator.h"
35 #include "DFGBinarySwitch.h"
36 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
37 #include "DFGSaneStringGetByValSlowPathGenerator.h"
38 #include "DFGSlowPathGenerator.h"
39 #include "LinkBuffer.h"
40 #include "JSCInlines.h"
41 #include "ScratchRegisterAllocator.h"
42 #include "WriteBarrierBuffer.h"
43 #include <wtf/MathExtras.h>
44
45 namespace JSC { namespace DFG {
46
47 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
48     : m_compileOkay(true)
49     , m_jit(jit)
50     , m_currentNode(0)
51     , m_indexInBlock(0)
52     , m_generationInfo(m_jit.graph().frameRegisterCount())
53     , m_state(m_jit.graph())
54     , m_interpreter(m_jit.graph(), m_state)
55     , m_stream(&jit.jitCode()->variableEventStream)
56     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
57     , m_isCheckingArgumentTypes(false)
58 {
59 }
60
61 SpeculativeJIT::~SpeculativeJIT()
62 {
63 }
64
65 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
66 {
67     ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
68     
69     GPRTemporary scratch(this);
70     GPRTemporary scratch2(this);
71     GPRReg scratchGPR = scratch.gpr();
72     GPRReg scratch2GPR = scratch2.gpr();
73     
74     unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
75     
76     JITCompiler::JumpList slowCases;
77     
78     slowCases.append(
79         emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
80     m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
81     emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
82     
83     m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
84     m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
85     
86     if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
87 #if USE(JSVALUE64)
88         m_jit.move(TrustedImm64(bitwise_cast<int64_t>(QNaN)), scratchGPR);
89         for (unsigned i = numElements; i < vectorLength; ++i)
90             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
91 #else
92         EncodedValueDescriptor value;
93         value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, QNaN));
94         for (unsigned i = numElements; i < vectorLength; ++i) {
95             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
96             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
97         }
98 #endif
99     }
100     
101     // I want a slow path that also loads out the storage pointer, and that's
102     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
103     // of work for a very small piece of functionality. :-/
104     addSlowPathGenerator(adoptPtr(
105         new CallArrayAllocatorSlowPathGenerator(
106             slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
107             structure, numElements)));
108 }
109
110 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
111 {
112     if (!m_compileOkay)
113         return;
114     ASSERT(m_isCheckingArgumentTypes || m_canExit);
115     m_jit.appendExitInfo(jumpToFail);
116     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
117 }
118
119 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
120 {
121     if (!m_compileOkay)
122         return;
123     ASSERT(m_isCheckingArgumentTypes || m_canExit);
124     m_jit.appendExitInfo(jumpsToFail);
125     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
126 }
127
128 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
129 {
130     if (!m_compileOkay)
131         return OSRExitJumpPlaceholder();
132     ASSERT(m_isCheckingArgumentTypes || m_canExit);
133     unsigned index = m_jit.jitCode()->osrExit.size();
134     m_jit.appendExitInfo();
135     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
136     return OSRExitJumpPlaceholder(index);
137 }
138
139 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
140 {
141     ASSERT(m_isCheckingArgumentTypes || m_canExit);
142     return speculationCheck(kind, jsValueSource, nodeUse.node());
143 }
144
145 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
146 {
147     ASSERT(m_isCheckingArgumentTypes || m_canExit);
148     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
149 }
150
151 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
152 {
153     ASSERT(m_isCheckingArgumentTypes || m_canExit);
154     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
155 }
156
157 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
158 {
159     if (!m_compileOkay)
160         return;
161     ASSERT(m_isCheckingArgumentTypes || m_canExit);
162     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
163     m_jit.appendExitInfo(jumpToFail);
164     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
165 }
166
167 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
168 {
169     ASSERT(m_isCheckingArgumentTypes || m_canExit);
170     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
171 }
172
173 void SpeculativeJIT::emitInvalidationPoint(Node* node)
174 {
175     if (!m_compileOkay)
176         return;
177     ASSERT(m_canExit);
178     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
179     m_jit.jitCode()->appendOSRExit(OSRExit(
180         UncountableInvalidation, JSValueSource(),
181         m_jit.graph().methodOfGettingAValueProfileFor(node),
182         this, m_stream->size()));
183     info.m_replacementSource = m_jit.watchpointLabel();
184     ASSERT(info.m_replacementSource.isSet());
185     noResult(node);
186 }
187
188 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
189 {
190     ASSERT(m_isCheckingArgumentTypes || m_canExit);
191     if (!m_compileOkay)
192         return;
193     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
194     m_compileOkay = false;
195 }
196
197 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
198 {
199     ASSERT(m_isCheckingArgumentTypes || m_canExit);
200     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
201 }
202
203 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
204 {
205     ASSERT(needsTypeCheck(edge, typesPassedThrough));
206     m_interpreter.filter(edge, typesPassedThrough);
207     speculationCheck(BadType, source, edge.node(), jumpToFail);
208 }
209
210 RegisterSet SpeculativeJIT::usedRegisters()
211 {
212     RegisterSet result;
213     
214     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
215         GPRReg gpr = GPRInfo::toRegister(i);
216         if (m_gprs.isInUse(gpr))
217             result.set(gpr);
218     }
219     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
220         FPRReg fpr = FPRInfo::toRegister(i);
221         if (m_fprs.isInUse(fpr))
222             result.set(fpr);
223     }
224     
225     result.merge(RegisterSet::specialRegisters());
226     
227     return result;
228 }
229
230 void SpeculativeJIT::addSlowPathGenerator(PassOwnPtr<SlowPathGenerator> slowPathGenerator)
231 {
232     m_slowPathGenerators.append(slowPathGenerator);
233 }
234
235 void SpeculativeJIT::runSlowPathGenerators()
236 {
237     for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
238         m_slowPathGenerators[i]->generate(this);
239 }
240
241 // On Windows we need to wrap fmod; on other platforms we can call it directly.
242 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
243 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
244 static double JIT_OPERATION fmodAsDFGOperation(double x, double y)
245 {
246     return fmod(x, y);
247 }
248 #else
249 #define fmodAsDFGOperation fmod
250 #endif
251
252 void SpeculativeJIT::clearGenerationInfo()
253 {
254     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
255         m_generationInfo[i] = GenerationInfo();
256     m_gprs = RegisterBank<GPRInfo>();
257     m_fprs = RegisterBank<FPRInfo>();
258 }
259
260 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
261 {
262     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
263     Node* node = info.node();
264     DataFormat registerFormat = info.registerFormat();
265     ASSERT(registerFormat != DataFormatNone);
266     ASSERT(registerFormat != DataFormatDouble);
267         
268     SilentSpillAction spillAction;
269     SilentFillAction fillAction;
270         
271     if (!info.needsSpill())
272         spillAction = DoNothingForSpill;
273     else {
274 #if USE(JSVALUE64)
275         ASSERT(info.gpr() == source);
276         if (registerFormat == DataFormatInt32)
277             spillAction = Store32Payload;
278         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
279             spillAction = StorePtr;
280         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
281             spillAction = Store64;
282         else {
283             ASSERT(registerFormat & DataFormatJS);
284             spillAction = Store64;
285         }
286 #elif USE(JSVALUE32_64)
287         if (registerFormat & DataFormatJS) {
288             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
289             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
290         } else {
291             ASSERT(info.gpr() == source);
292             spillAction = Store32Payload;
293         }
294 #endif
295     }
296         
297     if (registerFormat == DataFormatInt32) {
298         ASSERT(info.gpr() == source);
299         ASSERT(isJSInt32(info.registerFormat()));
300         if (node->hasConstant()) {
301             ASSERT(isInt32Constant(node));
302             fillAction = SetInt32Constant;
303         } else
304             fillAction = Load32Payload;
305     } else if (registerFormat == DataFormatBoolean) {
306 #if USE(JSVALUE64)
307         RELEASE_ASSERT_NOT_REACHED();
308         fillAction = DoNothingForFill;
309 #elif USE(JSVALUE32_64)
310         ASSERT(info.gpr() == source);
311         if (node->hasConstant()) {
312             ASSERT(isBooleanConstant(node));
313             fillAction = SetBooleanConstant;
314         } else
315             fillAction = Load32Payload;
316 #endif
317     } else if (registerFormat == DataFormatCell) {
318         ASSERT(info.gpr() == source);
319         if (node->hasConstant()) {
320             JSValue value = valueOfJSConstant(node);
321             ASSERT_UNUSED(value, value.isCell());
322             fillAction = SetCellConstant;
323         } else {
324 #if USE(JSVALUE64)
325             fillAction = LoadPtr;
326 #else
327             fillAction = Load32Payload;
328 #endif
329         }
330     } else if (registerFormat == DataFormatStorage) {
331         ASSERT(info.gpr() == source);
332         fillAction = LoadPtr;
333     } else if (registerFormat == DataFormatInt52) {
334         if (node->hasConstant())
335             fillAction = SetInt52Constant;
336         else if (isJSInt32(info.spillFormat()) || info.spillFormat() == DataFormatJS)
337             fillAction = Load32PayloadConvertToInt52;
338         else if (info.spillFormat() == DataFormatInt52)
339             fillAction = Load64;
340         else if (info.spillFormat() == DataFormatStrictInt52)
341             fillAction = Load64ShiftInt52Left;
342         else if (info.spillFormat() == DataFormatNone)
343             fillAction = Load64;
344         else {
345             // Should never happen. Anything that qualifies as an int32 will never
346             // be turned into a cell (immediate spec fail) or a double (to-double
347             // conversions involve a separate node).
348             RELEASE_ASSERT_NOT_REACHED();
349             fillAction = Load64; // Make GCC happy.
350         }
351     } else if (registerFormat == DataFormatStrictInt52) {
352         if (node->hasConstant())
353             fillAction = SetStrictInt52Constant;
354         else if (isJSInt32(info.spillFormat()) || info.spillFormat() == DataFormatJS)
355             fillAction = Load32PayloadSignExtend;
356         else if (info.spillFormat() == DataFormatInt52)
357             fillAction = Load64ShiftInt52Right;
358         else if (info.spillFormat() == DataFormatStrictInt52)
359             fillAction = Load64;
360         else if (info.spillFormat() == DataFormatNone)
361             fillAction = Load64;
362         else {
363             // Should never happen. Anything that qualifies as an int32 will never
364             // be turned into a cell (immediate spec fail) or a double (to-double
365             // conversions involve a separate node).
366             RELEASE_ASSERT_NOT_REACHED();
367             fillAction = Load64; // Make GCC happy.
368         }
369     } else {
370         ASSERT(registerFormat & DataFormatJS);
371 #if USE(JSVALUE64)
372         ASSERT(info.gpr() == source);
373         if (node->hasConstant()) {
374             if (valueOfJSConstant(node).isCell())
375                 fillAction = SetTrustedJSConstant;
376                 fillAction = SetJSConstant;
377         } else if (info.spillFormat() == DataFormatInt32) {
378             ASSERT(registerFormat == DataFormatJSInt32);
379             fillAction = Load32PayloadBoxInt;
380         } else if (info.spillFormat() == DataFormatDouble) {
381             ASSERT(registerFormat == DataFormatJSDouble);
382             fillAction = LoadDoubleBoxDouble;
383         } else
384             fillAction = Load64;
385 #else
386         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
387         if (node->hasConstant())
388             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
389         else if (info.payloadGPR() == source)
390             fillAction = Load32Payload;
391         else { // Fill the Tag
392             switch (info.spillFormat()) {
393             case DataFormatInt32:
394                 ASSERT(registerFormat == DataFormatJSInt32);
395                 fillAction = SetInt32Tag;
396                 break;
397             case DataFormatCell:
398                 ASSERT(registerFormat == DataFormatJSCell);
399                 fillAction = SetCellTag;
400                 break;
401             case DataFormatBoolean:
402                 ASSERT(registerFormat == DataFormatJSBoolean);
403                 fillAction = SetBooleanTag;
404                 break;
405             default:
406                 fillAction = Load32Tag;
407                 break;
408             }
409         }
410 #endif
411     }
412         
413     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
414 }
415     
416 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
417 {
418     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
419     Node* node = info.node();
420     ASSERT(info.registerFormat() == DataFormatDouble);
421
422     SilentSpillAction spillAction;
423     SilentFillAction fillAction;
424         
425     if (!info.needsSpill())
426         spillAction = DoNothingForSpill;
427     else {
428         ASSERT(!node->hasConstant());
429         ASSERT(info.spillFormat() == DataFormatNone);
430         ASSERT(info.fpr() == source);
431         spillAction = StoreDouble;
432     }
433         
434 #if USE(JSVALUE64)
435     if (node->hasConstant()) {
436         ASSERT(isNumberConstant(node));
437         fillAction = SetDoubleConstant;
438     } else if (info.spillFormat() != DataFormatNone && info.spillFormat() != DataFormatDouble) {
439         // it was already spilled previously and not as a double, which means we need unboxing.
440         ASSERT(info.spillFormat() & DataFormatJS);
441         fillAction = LoadJSUnboxDouble;
442     } else
443         fillAction = LoadDouble;
444 #elif USE(JSVALUE32_64)
445     ASSERT(info.registerFormat() == DataFormatDouble || info.registerFormat() == DataFormatJSDouble);
446     if (node->hasConstant()) {
447         ASSERT(isNumberConstant(node));
448         fillAction = SetDoubleConstant;
449     } else
450         fillAction = LoadDouble;
451 #endif
452
453     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
454 }
455     
456 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
457 {
458     switch (plan.spillAction()) {
459     case DoNothingForSpill:
460         break;
461     case Store32Tag:
462         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
463         break;
464     case Store32Payload:
465         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
466         break;
467     case StorePtr:
468         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
469         break;
470 #if USE(JSVALUE64)
471     case Store64:
472         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
473         break;
474 #endif
475     case StoreDouble:
476         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
477         break;
478     default:
479         RELEASE_ASSERT_NOT_REACHED();
480     }
481 }
482     
483 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
484 {
485 #if USE(JSVALUE32_64)
486     UNUSED_PARAM(canTrample);
487 #endif
488     switch (plan.fillAction()) {
489     case DoNothingForFill:
490         break;
491     case SetInt32Constant:
492         m_jit.move(Imm32(valueOfInt32Constant(plan.node())), plan.gpr());
493         break;
494 #if USE(JSVALUE64)
495     case SetInt52Constant:
496         m_jit.move(Imm64(valueOfJSConstant(plan.node()).asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
497         break;
498     case SetStrictInt52Constant:
499         m_jit.move(Imm64(valueOfJSConstant(plan.node()).asMachineInt()), plan.gpr());
500         break;
501 #endif // USE(JSVALUE64)
502     case SetBooleanConstant:
503         m_jit.move(TrustedImm32(valueOfBooleanConstant(plan.node())), plan.gpr());
504         break;
505     case SetCellConstant:
506         m_jit.move(TrustedImmPtr(valueOfJSConstant(plan.node()).asCell()), plan.gpr());
507         break;
508 #if USE(JSVALUE64)
509     case SetTrustedJSConstant:
510         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
511         break;
512     case SetJSConstant:
513         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
514         break;
515     case SetDoubleConstant:
516         m_jit.move(Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(plan.node()))), canTrample);
517         m_jit.move64ToDouble(canTrample, plan.fpr());
518         break;
519     case Load32PayloadBoxInt:
520         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
521         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
522         break;
523     case Load32PayloadConvertToInt52:
524         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
525         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
526         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
527         break;
528     case Load32PayloadSignExtend:
529         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
530         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
531         break;
532     case LoadDoubleBoxDouble:
533         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
534         m_jit.sub64(GPRInfo::tagTypeNumberRegister, plan.gpr());
535         break;
536     case LoadJSUnboxDouble:
537         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), canTrample);
538         unboxDouble(canTrample, plan.fpr());
539         break;
540 #else
541     case SetJSConstantTag:
542         m_jit.move(Imm32(valueOfJSConstant(plan.node()).tag()), plan.gpr());
543         break;
544     case SetJSConstantPayload:
545         m_jit.move(Imm32(valueOfJSConstant(plan.node()).payload()), plan.gpr());
546         break;
547     case SetInt32Tag:
548         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
549         break;
550     case SetCellTag:
551         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
552         break;
553     case SetBooleanTag:
554         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
555         break;
556     case SetDoubleConstant:
557         m_jit.loadDouble(addressOfDoubleConstant(plan.node()), plan.fpr());
558         break;
559 #endif
560     case Load32Tag:
561         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
562         break;
563     case Load32Payload:
564         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
565         break;
566     case LoadPtr:
567         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
568         break;
569 #if USE(JSVALUE64)
570     case Load64:
571         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
572         break;
573     case Load64ShiftInt52Right:
574         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
575         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
576         break;
577     case Load64ShiftInt52Left:
578         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
579         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
580         break;
581 #endif
582     case LoadDouble:
583         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
584         break;
585     default:
586         RELEASE_ASSERT_NOT_REACHED();
587     }
588 }
589     
590 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
591 {
592     switch (arrayMode.arrayClass()) {
593     case Array::OriginalArray: {
594         CRASH();
595         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
596         return result;
597     }
598         
599     case Array::Array:
600         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
601         return m_jit.branch32(
602             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
603         
604     case Array::NonArray:
605     case Array::OriginalNonArray:
606         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
607         return m_jit.branch32(
608             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
609         
610     case Array::PossiblyArray:
611         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
612         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
613     }
614     
615     RELEASE_ASSERT_NOT_REACHED();
616     return JITCompiler::Jump();
617 }
618
619 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
620 {
621     JITCompiler::JumpList result;
622     
623     switch (arrayMode.type()) {
624     case Array::Int32:
625         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
626
627     case Array::Double:
628         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
629
630     case Array::Contiguous:
631         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
632
633     case Array::ArrayStorage:
634     case Array::SlowPutArrayStorage: {
635         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
636         
637         if (arrayMode.isJSArray()) {
638             if (arrayMode.isSlowPut()) {
639                 result.append(
640                     m_jit.branchTest32(
641                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
642                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
643                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
644                 result.append(
645                     m_jit.branch32(
646                         MacroAssembler::Above, tempGPR,
647                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
648                 break;
649             }
650             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
651             result.append(
652                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
653             break;
654         }
655         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
656         if (arrayMode.isSlowPut()) {
657             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
658             result.append(
659                 m_jit.branch32(
660                     MacroAssembler::Above, tempGPR,
661                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
662             break;
663         }
664         result.append(
665             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
666         break;
667     }
668     default:
669         CRASH();
670         break;
671     }
672     
673     return result;
674 }
675
676 void SpeculativeJIT::checkArray(Node* node)
677 {
678     ASSERT(node->arrayMode().isSpecific());
679     ASSERT(!node->arrayMode().doesConversion());
680     
681     SpeculateCellOperand base(this, node->child1());
682     GPRReg baseReg = base.gpr();
683     
684     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
685         noResult(m_currentNode);
686         return;
687     }
688     
689     const ClassInfo* expectedClassInfo = 0;
690     
691     switch (node->arrayMode().type()) {
692     case Array::String:
693         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
694         break;
695     case Array::Int32:
696     case Array::Double:
697     case Array::Contiguous:
698     case Array::ArrayStorage:
699     case Array::SlowPutArrayStorage: {
700         GPRTemporary temp(this);
701         GPRReg tempGPR = temp.gpr();
702         m_jit.loadPtr(
703             MacroAssembler::Address(baseReg, JSCell::structureOffset()), tempGPR);
704         m_jit.load8(MacroAssembler::Address(tempGPR, Structure::indexingTypeOffset()), tempGPR);
705         speculationCheck(
706             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
707             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
708         
709         noResult(m_currentNode);
710         return;
711     }
712     case Array::Arguments:
713         expectedClassInfo = Arguments::info();
714         break;
715     default:
716         expectedClassInfo = classInfoForType(node->arrayMode().typedArrayType());
717         break;
718     }
719     
720     RELEASE_ASSERT(expectedClassInfo);
721     
722     GPRTemporary temp(this);
723     m_jit.loadPtr(
724         MacroAssembler::Address(baseReg, JSCell::structureOffset()), temp.gpr());
725     speculationCheck(
726         BadType, JSValueSource::unboxedCell(baseReg), node,
727         m_jit.branchPtr(
728             MacroAssembler::NotEqual,
729             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
730             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
731     
732     noResult(m_currentNode);
733 }
734
735 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
736 {
737     ASSERT(node->arrayMode().doesConversion());
738     
739     GPRTemporary temp(this);
740     GPRTemporary structure;
741     GPRReg tempGPR = temp.gpr();
742     GPRReg structureGPR = InvalidGPRReg;
743     
744     if (node->op() != ArrayifyToStructure) {
745         GPRTemporary realStructure(this);
746         structure.adopt(realStructure);
747         structureGPR = structure.gpr();
748     }
749         
750     // We can skip all that comes next if we already have array storage.
751     MacroAssembler::JumpList slowPath;
752     
753     if (node->op() == ArrayifyToStructure) {
754         slowPath.append(m_jit.branchWeakPtr(
755             JITCompiler::NotEqual,
756             JITCompiler::Address(baseReg, JSCell::structureOffset()),
757             node->structure()));
758     } else {
759         m_jit.loadPtr(
760             MacroAssembler::Address(baseReg, JSCell::structureOffset()), structureGPR);
761         
762         m_jit.load8(
763             MacroAssembler::Address(structureGPR, Structure::indexingTypeOffset()), tempGPR);
764         
765         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
766     }
767     
768     addSlowPathGenerator(adoptPtr(new ArrayifySlowPathGenerator(
769         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR)));
770     
771     noResult(m_currentNode);
772 }
773
774 void SpeculativeJIT::arrayify(Node* node)
775 {
776     ASSERT(node->arrayMode().isSpecific());
777     
778     SpeculateCellOperand base(this, node->child1());
779     
780     if (!node->child2()) {
781         arrayify(node, base.gpr(), InvalidGPRReg);
782         return;
783     }
784     
785     SpeculateInt32Operand property(this, node->child2());
786     
787     arrayify(node, base.gpr(), property.gpr());
788 }
789
790 GPRReg SpeculativeJIT::fillStorage(Edge edge)
791 {
792     VirtualRegister virtualRegister = edge->virtualRegister();
793     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
794     
795     switch (info.registerFormat()) {
796     case DataFormatNone: {
797         if (info.spillFormat() == DataFormatStorage) {
798             GPRReg gpr = allocate();
799             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
800             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
801             info.fillStorage(*m_stream, gpr);
802             return gpr;
803         }
804         
805         // Must be a cell; fill it as a cell and then return the pointer.
806         return fillSpeculateCell(edge);
807     }
808         
809     case DataFormatStorage: {
810         GPRReg gpr = info.gpr();
811         m_gprs.lock(gpr);
812         return gpr;
813     }
814         
815     default:
816         return fillSpeculateCell(edge);
817     }
818 }
819
820 void SpeculativeJIT::useChildren(Node* node)
821 {
822     if (node->flags() & NodeHasVarArgs) {
823         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
824             if (!!m_jit.graph().m_varArgChildren[childIdx])
825                 use(m_jit.graph().m_varArgChildren[childIdx]);
826         }
827     } else {
828         Edge child1 = node->child1();
829         if (!child1) {
830             ASSERT(!node->child2() && !node->child3());
831             return;
832         }
833         use(child1);
834         
835         Edge child2 = node->child2();
836         if (!child2) {
837             ASSERT(!node->child3());
838             return;
839         }
840         use(child2);
841         
842         Edge child3 = node->child3();
843         if (!child3)
844             return;
845         use(child3);
846     }
847 }
848
849 void SpeculativeJIT::compileIn(Node* node)
850 {
851     SpeculateCellOperand base(this, node->child2());
852     GPRReg baseGPR = base.gpr();
853         
854     if (isConstant(node->child1().node())) {
855         JSString* string =
856             jsDynamicCast<JSString*>(valueOfJSConstant(node->child1().node()));
857         if (string && string->tryGetValueImpl()
858             && string->tryGetValueImpl()->isIdentifier()) {
859             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo();
860             
861             GPRTemporary result(this);
862             GPRReg resultGPR = result.gpr();
863
864             use(node->child1());
865             
866             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
867             MacroAssembler::Label done = m_jit.label();
868             
869             OwnPtr<SlowPathGenerator> slowPath = slowPathCall(
870                 jump.m_jump, this, operationInOptimize,
871                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
872                 string->tryGetValueImpl());
873             
874             stubInfo->codeOrigin = node->origin.semantic;
875             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
876             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
877             stubInfo->patch.usedRegisters = usedRegisters();
878             stubInfo->patch.registersFlushed = false;
879             
880             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
881             addSlowPathGenerator(slowPath.release());
882                 
883             base.use();
884                 
885 #if USE(JSVALUE64)
886             jsValueResult(
887                 resultGPR, node, DataFormatJSBoolean, UseChildrenCalledExplicitly);
888 #else
889             booleanResult(resultGPR, node, UseChildrenCalledExplicitly);
890 #endif
891             return;
892         }
893     }
894         
895     JSValueOperand key(this, node->child1());
896     JSValueRegs regs = key.jsValueRegs();
897         
898     GPRResult result(this);
899     GPRReg resultGPR = result.gpr();
900         
901     base.use();
902     key.use();
903         
904     flushRegisters();
905     callOperation(
906         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
907         baseGPR, regs);
908 #if USE(JSVALUE64)
909     jsValueResult(resultGPR, node, DataFormatJSBoolean, UseChildrenCalledExplicitly);
910 #else
911     booleanResult(resultGPR, node, UseChildrenCalledExplicitly);
912 #endif
913 }
914
915 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
916 {
917     unsigned branchIndexInBlock = detectPeepHoleBranch();
918     if (branchIndexInBlock != UINT_MAX) {
919         Node* branchNode = m_block->at(branchIndexInBlock);
920
921         ASSERT(node->adjustedRefCount() == 1);
922         
923         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
924     
925         m_indexInBlock = branchIndexInBlock;
926         m_currentNode = branchNode;
927         
928         return true;
929     }
930     
931     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
932     
933     return false;
934 }
935
936 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
937 {
938     unsigned branchIndexInBlock = detectPeepHoleBranch();
939     if (branchIndexInBlock != UINT_MAX) {
940         Node* branchNode = m_block->at(branchIndexInBlock);
941
942         ASSERT(node->adjustedRefCount() == 1);
943         
944         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
945     
946         m_indexInBlock = branchIndexInBlock;
947         m_currentNode = branchNode;
948         
949         return true;
950     }
951     
952     nonSpeculativeNonPeepholeStrictEq(node, invert);
953     
954     return false;
955 }
956
957 static const char* dataFormatString(DataFormat format)
958 {
959     // These values correspond to the DataFormat enum.
960     const char* strings[] = {
961         "[  ]",
962         "[ i]",
963         "[ d]",
964         "[ c]",
965         "Err!",
966         "Err!",
967         "Err!",
968         "Err!",
969         "[J ]",
970         "[Ji]",
971         "[Jd]",
972         "[Jc]",
973         "Err!",
974         "Err!",
975         "Err!",
976         "Err!",
977     };
978     return strings[format];
979 }
980
981 void SpeculativeJIT::dump(const char* label)
982 {
983     if (label)
984         dataLogF("<%s>\n", label);
985
986     dataLogF("  gprs:\n");
987     m_gprs.dump();
988     dataLogF("  fprs:\n");
989     m_fprs.dump();
990     dataLogF("  VirtualRegisters:\n");
991     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
992         GenerationInfo& info = m_generationInfo[i];
993         if (info.alive())
994             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
995         else
996             dataLogF("    % 3d:[__][__]", i);
997         if (info.registerFormat() == DataFormatDouble)
998             dataLogF(":fpr%d\n", info.fpr());
999         else if (info.registerFormat() != DataFormatNone
1000 #if USE(JSVALUE32_64)
1001             && !(info.registerFormat() & DataFormatJS)
1002 #endif
1003             ) {
1004             ASSERT(info.gpr() != InvalidGPRReg);
1005             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1006         } else
1007             dataLogF("\n");
1008     }
1009     if (label)
1010         dataLogF("</%s>\n", label);
1011 }
1012
1013 GPRTemporary::GPRTemporary()
1014     : m_jit(0)
1015     , m_gpr(InvalidGPRReg)
1016 {
1017 }
1018
1019 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1020     : m_jit(jit)
1021     , m_gpr(InvalidGPRReg)
1022 {
1023     m_gpr = m_jit->allocate();
1024 }
1025
1026 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1027     : m_jit(jit)
1028     , m_gpr(InvalidGPRReg)
1029 {
1030     m_gpr = m_jit->allocate(specific);
1031 }
1032
1033 #if USE(JSVALUE32_64)
1034 GPRTemporary::GPRTemporary(
1035     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1036     : m_jit(jit)
1037     , m_gpr(InvalidGPRReg)
1038 {
1039     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1040         m_gpr = m_jit->reuse(op1.gpr(which));
1041     else
1042         m_gpr = m_jit->allocate();
1043 }
1044 #endif // USE(JSVALUE32_64)
1045
1046 void GPRTemporary::adopt(GPRTemporary& other)
1047 {
1048     ASSERT(!m_jit);
1049     ASSERT(m_gpr == InvalidGPRReg);
1050     ASSERT(other.m_jit);
1051     ASSERT(other.m_gpr != InvalidGPRReg);
1052     m_jit = other.m_jit;
1053     m_gpr = other.m_gpr;
1054     other.m_jit = 0;
1055     other.m_gpr = InvalidGPRReg;
1056 }
1057
1058 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1059     : m_jit(jit)
1060     , m_fpr(InvalidFPRReg)
1061 {
1062     m_fpr = m_jit->fprAllocate();
1063 }
1064
1065 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1066     : m_jit(jit)
1067     , m_fpr(InvalidFPRReg)
1068 {
1069     if (m_jit->canReuse(op1.node()))
1070         m_fpr = m_jit->reuse(op1.fpr());
1071     else
1072         m_fpr = m_jit->fprAllocate();
1073 }
1074
1075 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1076     : m_jit(jit)
1077     , m_fpr(InvalidFPRReg)
1078 {
1079     if (m_jit->canReuse(op1.node()))
1080         m_fpr = m_jit->reuse(op1.fpr());
1081     else if (m_jit->canReuse(op2.node()))
1082         m_fpr = m_jit->reuse(op2.fpr());
1083     else
1084         m_fpr = m_jit->fprAllocate();
1085 }
1086
1087 #if USE(JSVALUE32_64)
1088 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1089     : m_jit(jit)
1090     , m_fpr(InvalidFPRReg)
1091 {
1092     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1093         m_fpr = m_jit->reuse(op1.fpr());
1094     else
1095         m_fpr = m_jit->fprAllocate();
1096 }
1097 #endif
1098
1099 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1100 {
1101     BasicBlock* taken = branchNode->takenBlock();
1102     BasicBlock* notTaken = branchNode->notTakenBlock();
1103     
1104     SpeculateDoubleOperand op1(this, node->child1());
1105     SpeculateDoubleOperand op2(this, node->child2());
1106     
1107     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1108     jump(notTaken);
1109 }
1110
1111 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1112 {
1113     BasicBlock* taken = branchNode->takenBlock();
1114     BasicBlock* notTaken = branchNode->notTakenBlock();
1115
1116     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1117     
1118     if (taken == nextBlock()) {
1119         condition = MacroAssembler::NotEqual;
1120         BasicBlock* tmp = taken;
1121         taken = notTaken;
1122         notTaken = tmp;
1123     }
1124
1125     SpeculateCellOperand op1(this, node->child1());
1126     SpeculateCellOperand op2(this, node->child2());
1127     
1128     GPRReg op1GPR = op1.gpr();
1129     GPRReg op2GPR = op2.gpr();
1130     
1131     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1132         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1133             speculationCheck(
1134                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), 
1135                 m_jit.branchPtr(
1136                     MacroAssembler::Equal, 
1137                     MacroAssembler::Address(op1GPR, JSCell::structureOffset()), 
1138                     MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1139         }
1140         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1141             speculationCheck(
1142                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1143                 m_jit.branchPtr(
1144                     MacroAssembler::Equal, 
1145                     MacroAssembler::Address(op2GPR, JSCell::structureOffset()), 
1146                     MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1147         }
1148     } else {
1149         GPRTemporary structure(this);
1150         GPRReg structureGPR = structure.gpr();
1151
1152         m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR);
1153         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1154             speculationCheck(
1155                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1156                 m_jit.branchPtr(
1157                     MacroAssembler::Equal, 
1158                     structureGPR, 
1159                     MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1160         }
1161         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1162             m_jit.branchTest8(
1163                 MacroAssembler::NonZero, 
1164                 MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), 
1165                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1166
1167         m_jit.loadPtr(MacroAssembler::Address(op2GPR, JSCell::structureOffset()), structureGPR);
1168         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1169             speculationCheck(
1170                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1171                 m_jit.branchPtr(
1172                     MacroAssembler::Equal, 
1173                     structureGPR, 
1174                     MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1175         }
1176         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1177             m_jit.branchTest8(
1178                 MacroAssembler::NonZero, 
1179                 MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), 
1180                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1181     }
1182
1183     branchPtr(condition, op1GPR, op2GPR, taken);
1184     jump(notTaken);
1185 }
1186
1187 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1188 {
1189     BasicBlock* taken = branchNode->takenBlock();
1190     BasicBlock* notTaken = branchNode->notTakenBlock();
1191
1192     // The branch instruction will branch to the taken block.
1193     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1194     if (taken == nextBlock()) {
1195         condition = JITCompiler::invert(condition);
1196         BasicBlock* tmp = taken;
1197         taken = notTaken;
1198         notTaken = tmp;
1199     }
1200
1201     if (isBooleanConstant(node->child1().node())) {
1202         bool imm = valueOfBooleanConstant(node->child1().node());
1203         SpeculateBooleanOperand op2(this, node->child2());
1204         branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1205     } else if (isBooleanConstant(node->child2().node())) {
1206         SpeculateBooleanOperand op1(this, node->child1());
1207         bool imm = valueOfBooleanConstant(node->child2().node());
1208         branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1209     } else {
1210         SpeculateBooleanOperand op1(this, node->child1());
1211         SpeculateBooleanOperand op2(this, node->child2());
1212         branch32(condition, op1.gpr(), op2.gpr(), taken);
1213     }
1214
1215     jump(notTaken);
1216 }
1217
1218 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1219 {
1220     BasicBlock* taken = branchNode->takenBlock();
1221     BasicBlock* notTaken = branchNode->notTakenBlock();
1222
1223     // The branch instruction will branch to the taken block.
1224     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1225     if (taken == nextBlock()) {
1226         condition = JITCompiler::invert(condition);
1227         BasicBlock* tmp = taken;
1228         taken = notTaken;
1229         notTaken = tmp;
1230     }
1231
1232     if (isInt32Constant(node->child1().node())) {
1233         int32_t imm = valueOfInt32Constant(node->child1().node());
1234         SpeculateInt32Operand op2(this, node->child2());
1235         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1236     } else if (isInt32Constant(node->child2().node())) {
1237         SpeculateInt32Operand op1(this, node->child1());
1238         int32_t imm = valueOfInt32Constant(node->child2().node());
1239         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1240     } else {
1241         SpeculateInt32Operand op1(this, node->child1());
1242         SpeculateInt32Operand op2(this, node->child2());
1243         branch32(condition, op1.gpr(), op2.gpr(), taken);
1244     }
1245
1246     jump(notTaken);
1247 }
1248
1249 // Returns true if the compare is fused with a subsequent branch.
1250 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1251 {
1252     // Fused compare & branch.
1253     unsigned branchIndexInBlock = detectPeepHoleBranch();
1254     if (branchIndexInBlock != UINT_MAX) {
1255         Node* branchNode = m_block->at(branchIndexInBlock);
1256
1257         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1258         // so can be no intervening nodes to also reference the compare. 
1259         ASSERT(node->adjustedRefCount() == 1);
1260
1261         if (node->isBinaryUseKind(Int32Use))
1262             compilePeepHoleInt32Branch(node, branchNode, condition);
1263 #if USE(JSVALUE64)
1264         else if (node->isBinaryUseKind(MachineIntUse))
1265             compilePeepHoleInt52Branch(node, branchNode, condition);
1266 #endif // USE(JSVALUE64)
1267         else if (node->isBinaryUseKind(NumberUse))
1268             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1269         else if (node->op() == CompareEq) {
1270             if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1271                 // Use non-peephole comparison, for now.
1272                 return false;
1273             }
1274             if (node->isBinaryUseKind(BooleanUse))
1275                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1276             else if (node->isBinaryUseKind(ObjectUse))
1277                 compilePeepHoleObjectEquality(node, branchNode);
1278             else if (node->child1().useKind() == ObjectUse && node->child2().useKind() == ObjectOrOtherUse)
1279                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1280             else if (node->child1().useKind() == ObjectOrOtherUse && node->child2().useKind() == ObjectUse)
1281                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1282             else {
1283                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1284                 return true;
1285             }
1286         } else {
1287             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1288             return true;
1289         }
1290
1291         use(node->child1());
1292         use(node->child2());
1293         m_indexInBlock = branchIndexInBlock;
1294         m_currentNode = branchNode;
1295         return true;
1296     }
1297     return false;
1298 }
1299
1300 void SpeculativeJIT::noticeOSRBirth(Node* node)
1301 {
1302     if (!node->hasVirtualRegister())
1303         return;
1304     
1305     VirtualRegister virtualRegister = node->virtualRegister();
1306     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1307     
1308     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1309 }
1310
1311 void SpeculativeJIT::compileMovHint(Node* node)
1312 {
1313     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1314     
1315     Node* child = node->child1().node();
1316     noticeOSRBirth(child);
1317     
1318     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1319 }
1320
1321 void SpeculativeJIT::bail()
1322 {
1323     m_compileOkay = true;
1324     m_jit.breakpoint();
1325     clearGenerationInfo();
1326 }
1327
1328 void SpeculativeJIT::compileCurrentBlock()
1329 {
1330     ASSERT(m_compileOkay);
1331     
1332     if (!m_block)
1333         return;
1334     
1335     ASSERT(m_block->isReachable);
1336     
1337     m_jit.blockHeads()[m_block->index] = m_jit.label();
1338
1339     if (!m_block->cfaHasVisited) {
1340         // Don't generate code for basic blocks that are unreachable according to CFA.
1341         // But to be sure that nobody has generated a jump to this block, drop in a
1342         // breakpoint here.
1343         m_jit.breakpoint();
1344         return;
1345     }
1346
1347     m_stream->appendAndLog(VariableEvent::reset());
1348     
1349     m_jit.jitAssertHasValidCallFrame();
1350     m_jit.jitAssertTagsInPlace();
1351     m_jit.jitAssertArgumentCountSane();
1352
1353     for (size_t i = 0; i < m_block->variablesAtHead.numberOfArguments(); ++i) {
1354         m_stream->appendAndLog(
1355             VariableEvent::setLocal(
1356                 virtualRegisterForArgument(i), virtualRegisterForArgument(i), DataFormatJS));
1357     }
1358     
1359     m_state.reset();
1360     m_state.beginBasicBlock(m_block);
1361     
1362     for (size_t i = 0; i < m_block->variablesAtHead.numberOfLocals(); ++i) {
1363         Node* node = m_block->variablesAtHead.local(i);
1364         if (!node)
1365             continue; // No need to record dead SetLocal's.
1366         
1367         VariableAccessData* variable = node->variableAccessData();
1368         DataFormat format;
1369         if (!node->refCount())
1370             continue; // No need to record dead SetLocal's.
1371         else
1372             format = dataFormatFor(variable->flushFormat());
1373         m_stream->appendAndLog(
1374             VariableEvent::setLocal(virtualRegisterForLocal(i), variable->machineLocal(), format));
1375     }
1376     
1377     m_codeOriginForExitTarget = CodeOrigin();
1378     m_codeOriginForExitProfile = CodeOrigin();
1379     
1380     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1381         m_currentNode = m_block->at(m_indexInBlock);
1382         
1383         // We may have his a contradiction that the CFA was aware of but that the JIT
1384         // didn't cause directly.
1385         if (!m_state.isValid()) {
1386             bail();
1387             return;
1388         }
1389         
1390         m_canExit = m_currentNode->canExit();
1391         bool shouldExecuteEffects = m_interpreter.startExecuting(m_currentNode);
1392         m_jit.setForNode(m_currentNode);
1393         m_codeOriginForExitTarget = m_currentNode->origin.forExit;
1394         m_codeOriginForExitProfile = m_currentNode->origin.semantic;
1395         if (!m_currentNode->shouldGenerate()) {
1396             switch (m_currentNode->op()) {
1397             case JSConstant:
1398                 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1399                 break;
1400                 
1401             case WeakJSConstant:
1402                 m_jit.addWeakReference(m_currentNode->weakConstant());
1403                 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1404                 break;
1405                 
1406             case SetLocal:
1407                 RELEASE_ASSERT_NOT_REACHED();
1408                 break;
1409                 
1410             case MovHint:
1411                 compileMovHint(m_currentNode);
1412                 break;
1413                 
1414             case ZombieHint: {
1415                 recordSetLocal(m_currentNode->unlinkedLocal(), VirtualRegister(), DataFormatDead);
1416                 break;
1417             }
1418
1419             default:
1420                 if (belongsInMinifiedGraph(m_currentNode->op()))
1421                     m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1422                 break;
1423             }
1424         } else {
1425             
1426             if (verboseCompilationEnabled()) {
1427                 dataLogF(
1428                     "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1429                     (int)m_currentNode->index(),
1430                     m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1431                 dataLog("\n");
1432             }
1433             
1434             compile(m_currentNode);
1435
1436 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1437             m_jit.clearRegisterAllocationOffsets();
1438 #endif
1439
1440             if (!m_compileOkay) {
1441                 bail();
1442                 return;
1443             }
1444             
1445             if (belongsInMinifiedGraph(m_currentNode->op())) {
1446                 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1447                 noticeOSRBirth(m_currentNode);
1448             }
1449         }
1450         
1451         // Make sure that the abstract state is rematerialized for the next node.
1452         if (shouldExecuteEffects)
1453             m_interpreter.executeEffects(m_indexInBlock);
1454     }
1455     
1456     // Perform the most basic verification that children have been used correctly.
1457     if (!ASSERT_DISABLED) {
1458         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1459             GenerationInfo& info = m_generationInfo[index];
1460             RELEASE_ASSERT(!info.alive());
1461         }
1462     }
1463 }
1464
1465 // If we are making type predictions about our arguments then
1466 // we need to check that they are correct on function entry.
1467 void SpeculativeJIT::checkArgumentTypes()
1468 {
1469     ASSERT(!m_currentNode);
1470     m_isCheckingArgumentTypes = true;
1471     m_codeOriginForExitTarget = CodeOrigin(0);
1472     m_codeOriginForExitProfile = CodeOrigin(0);
1473
1474     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1475         Node* node = m_jit.graph().m_arguments[i];
1476         if (!node) {
1477             // The argument is dead. We don't do any checks for such arguments.
1478             continue;
1479         }
1480         
1481         ASSERT(node->op() == SetArgument);
1482         ASSERT(node->shouldGenerate());
1483
1484         VariableAccessData* variableAccessData = node->variableAccessData();
1485         FlushFormat format = variableAccessData->flushFormat();
1486         
1487         if (format == FlushedJSValue)
1488             continue;
1489         
1490         VirtualRegister virtualRegister = variableAccessData->local();
1491
1492         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1493         
1494 #if USE(JSVALUE64)
1495         switch (format) {
1496         case FlushedInt32: {
1497             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1498             break;
1499         }
1500         case FlushedBoolean: {
1501             GPRTemporary temp(this);
1502             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1503             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1504             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1505             break;
1506         }
1507         case FlushedCell: {
1508             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1509             break;
1510         }
1511         default:
1512             RELEASE_ASSERT_NOT_REACHED();
1513             break;
1514         }
1515 #else
1516         switch (format) {
1517         case FlushedInt32: {
1518             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1519             break;
1520         }
1521         case FlushedBoolean: {
1522             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1523             break;
1524         }
1525         case FlushedCell: {
1526             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1527             break;
1528         }
1529         default:
1530             RELEASE_ASSERT_NOT_REACHED();
1531             break;
1532         }
1533 #endif
1534     }
1535     m_isCheckingArgumentTypes = false;
1536 }
1537
1538 void SpeculativeJIT::prepareJITCodeForTierUp()
1539 {
1540     unsigned numberOfCalls = 0;
1541     
1542     for (BlockIndex blockIndex = m_jit.graph().numBlocks(); blockIndex--;) {
1543         BasicBlock* block = m_jit.graph().block(blockIndex);
1544         if (!block)
1545             continue;
1546         
1547         for (unsigned nodeIndex = block->size(); nodeIndex--;) {
1548             Node* node = block->at(nodeIndex);
1549             
1550             switch (node->op()) {
1551             case Call:
1552             case Construct:
1553                 numberOfCalls++;
1554                 break;
1555                 
1556             default:
1557                 break;
1558             }
1559         }
1560     }
1561     
1562     m_jit.jitCode()->slowPathCalls.fill(0, numberOfCalls);
1563 }
1564
1565 bool SpeculativeJIT::compile()
1566 {
1567     checkArgumentTypes();
1568     
1569     if (m_jit.graph().m_plan.willTryToTierUp)
1570         prepareJITCodeForTierUp();
1571     
1572     ASSERT(!m_currentNode);
1573     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1574         m_jit.setForBlockIndex(blockIndex);
1575         m_block = m_jit.graph().block(blockIndex);
1576         compileCurrentBlock();
1577     }
1578     linkBranches();
1579     return true;
1580 }
1581
1582 void SpeculativeJIT::createOSREntries()
1583 {
1584     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1585         BasicBlock* block = m_jit.graph().block(blockIndex);
1586         if (!block)
1587             continue;
1588         if (!block->isOSRTarget)
1589             continue;
1590         
1591         // Currently we don't have OSR entry trampolines. We could add them
1592         // here if need be.
1593         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1594     }
1595 }
1596
1597 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1598 {
1599     unsigned osrEntryIndex = 0;
1600     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1601         BasicBlock* block = m_jit.graph().block(blockIndex);
1602         if (!block)
1603             continue;
1604         if (!block->isOSRTarget)
1605             continue;
1606         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1607     }
1608     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1609 }
1610
1611 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1612 {
1613     Edge child3 = m_jit.graph().varArgChild(node, 2);
1614     Edge child4 = m_jit.graph().varArgChild(node, 3);
1615
1616     ArrayMode arrayMode = node->arrayMode();
1617     
1618     GPRReg baseReg = base.gpr();
1619     GPRReg propertyReg = property.gpr();
1620     
1621     SpeculateDoubleOperand value(this, child3);
1622
1623     FPRReg valueReg = value.fpr();
1624     
1625     DFG_TYPE_CHECK(
1626         JSValueRegs(), child3, SpecFullRealNumber,
1627         m_jit.branchDouble(
1628             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1629     
1630     if (!m_compileOkay)
1631         return;
1632     
1633     StorageOperand storage(this, child4);
1634     GPRReg storageReg = storage.gpr();
1635
1636     if (node->op() == PutByValAlias) {
1637         // Store the value to the array.
1638         GPRReg propertyReg = property.gpr();
1639         FPRReg valueReg = value.fpr();
1640         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1641         
1642         noResult(m_currentNode);
1643         return;
1644     }
1645     
1646     GPRTemporary temporary;
1647     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1648
1649     MacroAssembler::Jump slowCase;
1650     
1651     if (arrayMode.isInBounds()) {
1652         speculationCheck(
1653             OutOfBounds, JSValueRegs(), 0,
1654             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1655     } else {
1656         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1657         
1658         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1659         
1660         if (!arrayMode.isOutOfBounds())
1661             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1662         
1663         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1664         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1665         
1666         inBounds.link(&m_jit);
1667     }
1668     
1669     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1670
1671     base.use();
1672     property.use();
1673     value.use();
1674     storage.use();
1675     
1676     if (arrayMode.isOutOfBounds()) {
1677         addSlowPathGenerator(
1678             slowPathCall(
1679                 slowCase, this,
1680                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1681                 NoResult, baseReg, propertyReg, valueReg));
1682     }
1683
1684     noResult(m_currentNode, UseChildrenCalledExplicitly);
1685 }
1686
1687 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1688 {
1689     SpeculateCellOperand string(this, node->child1());
1690     SpeculateStrictInt32Operand index(this, node->child2());
1691     StorageOperand storage(this, node->child3());
1692
1693     GPRReg stringReg = string.gpr();
1694     GPRReg indexReg = index.gpr();
1695     GPRReg storageReg = storage.gpr();
1696     
1697     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1698
1699     // unsigned comparison so we can filter out negative indices and indices that are too large
1700     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1701
1702     GPRTemporary scratch(this);
1703     GPRReg scratchReg = scratch.gpr();
1704
1705     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1706
1707     // Load the character into scratchReg
1708     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1709
1710     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1711     JITCompiler::Jump cont8Bit = m_jit.jump();
1712
1713     is16Bit.link(&m_jit);
1714
1715     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1716
1717     cont8Bit.link(&m_jit);
1718
1719     int32Result(scratchReg, m_currentNode);
1720 }
1721
1722 void SpeculativeJIT::compileGetByValOnString(Node* node)
1723 {
1724     SpeculateCellOperand base(this, node->child1());
1725     SpeculateStrictInt32Operand property(this, node->child2());
1726     StorageOperand storage(this, node->child3());
1727     GPRReg baseReg = base.gpr();
1728     GPRReg propertyReg = property.gpr();
1729     GPRReg storageReg = storage.gpr();
1730
1731     GPRTemporary scratch(this);
1732     GPRReg scratchReg = scratch.gpr();
1733 #if USE(JSVALUE32_64)
1734     GPRTemporary resultTag;
1735     GPRReg resultTagReg = InvalidGPRReg;
1736     if (node->arrayMode().isOutOfBounds()) {
1737         GPRTemporary realResultTag(this);
1738         resultTag.adopt(realResultTag);
1739         resultTagReg = resultTag.gpr();
1740     }
1741 #endif
1742
1743     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1744
1745     // unsigned comparison so we can filter out negative indices and indices that are too large
1746     JITCompiler::Jump outOfBounds = m_jit.branch32(
1747         MacroAssembler::AboveOrEqual, propertyReg,
1748         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1749     if (node->arrayMode().isInBounds())
1750         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1751
1752     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1753
1754     // Load the character into scratchReg
1755     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1756
1757     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1758     JITCompiler::Jump cont8Bit = m_jit.jump();
1759
1760     is16Bit.link(&m_jit);
1761
1762     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1763
1764     JITCompiler::Jump bigCharacter =
1765         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1766
1767     // 8 bit string values don't need the isASCII check.
1768     cont8Bit.link(&m_jit);
1769
1770     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1771     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1772     m_jit.loadPtr(scratchReg, scratchReg);
1773
1774     addSlowPathGenerator(
1775         slowPathCall(
1776             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1777
1778     if (node->arrayMode().isOutOfBounds()) {
1779 #if USE(JSVALUE32_64)
1780         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1781 #endif
1782
1783         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1784         if (globalObject->stringPrototypeChainIsSane()) {
1785 #if USE(JSVALUE64)
1786             addSlowPathGenerator(adoptPtr(new SaneStringGetByValSlowPathGenerator(
1787                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg)));
1788 #else
1789             addSlowPathGenerator(adoptPtr(new SaneStringGetByValSlowPathGenerator(
1790                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
1791                 baseReg, propertyReg)));
1792 #endif
1793         } else {
1794 #if USE(JSVALUE64)
1795             addSlowPathGenerator(
1796                 slowPathCall(
1797                     outOfBounds, this, operationGetByValStringInt,
1798                     scratchReg, baseReg, propertyReg));
1799 #else
1800             addSlowPathGenerator(
1801                 slowPathCall(
1802                     outOfBounds, this, operationGetByValStringInt,
1803                     resultTagReg, scratchReg, baseReg, propertyReg));
1804 #endif
1805         }
1806         
1807 #if USE(JSVALUE64)
1808         jsValueResult(scratchReg, m_currentNode);
1809 #else
1810         jsValueResult(resultTagReg, scratchReg, m_currentNode);
1811 #endif
1812     } else
1813         cellResult(scratchReg, m_currentNode);
1814 }
1815
1816 void SpeculativeJIT::compileFromCharCode(Node* node)
1817 {
1818     SpeculateStrictInt32Operand property(this, node->child1());
1819     GPRReg propertyReg = property.gpr();
1820     GPRTemporary smallStrings(this);
1821     GPRTemporary scratch(this);
1822     GPRReg scratchReg = scratch.gpr();
1823     GPRReg smallStringsReg = smallStrings.gpr();
1824
1825     JITCompiler::JumpList slowCases;
1826     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
1827     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
1828     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
1829
1830     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
1831     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
1832     cellResult(scratchReg, m_currentNode);
1833 }
1834
1835 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
1836 {
1837     VirtualRegister virtualRegister = node->virtualRegister();
1838     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1839
1840     switch (info.registerFormat()) {
1841     case DataFormatStorage:
1842         RELEASE_ASSERT_NOT_REACHED();
1843
1844     case DataFormatBoolean:
1845     case DataFormatCell:
1846         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1847         return GeneratedOperandTypeUnknown;
1848
1849     case DataFormatNone:
1850     case DataFormatJSCell:
1851     case DataFormatJS:
1852     case DataFormatJSBoolean:
1853         return GeneratedOperandJSValue;
1854
1855     case DataFormatJSInt32:
1856     case DataFormatInt32:
1857         return GeneratedOperandInteger;
1858
1859     case DataFormatJSDouble:
1860     case DataFormatDouble:
1861         return GeneratedOperandDouble;
1862         
1863     default:
1864         RELEASE_ASSERT_NOT_REACHED();
1865         return GeneratedOperandTypeUnknown;
1866     }
1867 }
1868
1869 void SpeculativeJIT::compileValueToInt32(Node* node)
1870 {
1871     switch (node->child1().useKind()) {
1872     case Int32Use: {
1873         SpeculateInt32Operand op1(this, node->child1());
1874         GPRTemporary result(this, Reuse, op1);
1875         m_jit.move(op1.gpr(), result.gpr());
1876         int32Result(result.gpr(), node, op1.format());
1877         return;
1878     }
1879         
1880 #if USE(JSVALUE64)
1881     case MachineIntUse: {
1882         SpeculateStrictInt52Operand op1(this, node->child1());
1883         GPRTemporary result(this, Reuse, op1);
1884         GPRReg op1GPR = op1.gpr();
1885         GPRReg resultGPR = result.gpr();
1886         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
1887         int32Result(resultGPR, node, DataFormatInt32);
1888         return;
1889     }
1890 #endif // USE(JSVALUE64)
1891     
1892     case NumberUse:
1893     case NotCellUse: {
1894         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
1895         case GeneratedOperandInteger: {
1896             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
1897             GPRTemporary result(this, Reuse, op1);
1898             m_jit.move(op1.gpr(), result.gpr());
1899             int32Result(result.gpr(), node, op1.format());
1900             return;
1901         }
1902         case GeneratedOperandDouble: {
1903             GPRTemporary result(this);
1904             SpeculateDoubleOperand op1(this, node->child1(), ManualOperandSpeculation);
1905             FPRReg fpr = op1.fpr();
1906             GPRReg gpr = result.gpr();
1907             JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
1908             
1909             addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
1910
1911             int32Result(gpr, node);
1912             return;
1913         }
1914         case GeneratedOperandJSValue: {
1915             GPRTemporary result(this);
1916 #if USE(JSVALUE64)
1917             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1918
1919             GPRReg gpr = op1.gpr();
1920             GPRReg resultGpr = result.gpr();
1921             FPRTemporary tempFpr(this);
1922             FPRReg fpr = tempFpr.fpr();
1923
1924             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
1925             JITCompiler::JumpList converted;
1926
1927             if (node->child1().useKind() == NumberUse) {
1928                 DFG_TYPE_CHECK(
1929                     JSValueRegs(gpr), node->child1(), SpecFullNumber,
1930                     m_jit.branchTest64(
1931                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
1932             } else {
1933                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
1934                 
1935                 DFG_TYPE_CHECK(
1936                     JSValueRegs(gpr), node->child1(), ~SpecCell,
1937                     m_jit.branchTest64(
1938                         JITCompiler::Zero, gpr, GPRInfo::tagMaskRegister));
1939                 
1940                 // It's not a cell: so true turns into 1 and all else turns into 0.
1941                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
1942                 converted.append(m_jit.jump());
1943                 
1944                 isNumber.link(&m_jit);
1945             }
1946
1947             // First, if we get here we have a double encoded as a JSValue
1948             m_jit.move(gpr, resultGpr);
1949             unboxDouble(resultGpr, fpr);
1950
1951             silentSpillAllRegisters(resultGpr);
1952             callOperation(toInt32, resultGpr, fpr);
1953             silentFillAllRegisters(resultGpr);
1954
1955             converted.append(m_jit.jump());
1956
1957             isInteger.link(&m_jit);
1958             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
1959
1960             converted.link(&m_jit);
1961 #else
1962             Node* childNode = node->child1().node();
1963             VirtualRegister virtualRegister = childNode->virtualRegister();
1964             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1965
1966             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1967
1968             GPRReg payloadGPR = op1.payloadGPR();
1969             GPRReg resultGpr = result.gpr();
1970         
1971             JITCompiler::JumpList converted;
1972
1973             if (info.registerFormat() == DataFormatJSInt32)
1974                 m_jit.move(payloadGPR, resultGpr);
1975             else {
1976                 GPRReg tagGPR = op1.tagGPR();
1977                 FPRTemporary tempFpr(this);
1978                 FPRReg fpr = tempFpr.fpr();
1979                 FPRTemporary scratch(this);
1980
1981                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
1982
1983                 if (node->child1().useKind() == NumberUse) {
1984                     DFG_TYPE_CHECK(
1985                         JSValueRegs(tagGPR, payloadGPR), node->child1(), SpecFullNumber,
1986                         m_jit.branch32(
1987                             MacroAssembler::AboveOrEqual, tagGPR,
1988                             TrustedImm32(JSValue::LowestTag)));
1989                 } else {
1990                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
1991                     
1992                     DFG_TYPE_CHECK(
1993                         JSValueRegs(tagGPR, payloadGPR), node->child1(), ~SpecCell,
1994                         m_jit.branch32(
1995                             JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::CellTag)));
1996                     
1997                     // It's not a cell: so true turns into 1 and all else turns into 0.
1998                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
1999                     m_jit.move(TrustedImm32(0), resultGpr);
2000                     converted.append(m_jit.jump());
2001                     
2002                     isBoolean.link(&m_jit);
2003                     m_jit.move(payloadGPR, resultGpr);
2004                     converted.append(m_jit.jump());
2005                     
2006                     isNumber.link(&m_jit);
2007                 }
2008
2009                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2010
2011                 silentSpillAllRegisters(resultGpr);
2012                 callOperation(toInt32, resultGpr, fpr);
2013                 silentFillAllRegisters(resultGpr);
2014
2015                 converted.append(m_jit.jump());
2016
2017                 isInteger.link(&m_jit);
2018                 m_jit.move(payloadGPR, resultGpr);
2019
2020                 converted.link(&m_jit);
2021             }
2022 #endif
2023             int32Result(resultGpr, node);
2024             return;
2025         }
2026         case GeneratedOperandTypeUnknown:
2027             RELEASE_ASSERT(!m_compileOkay);
2028             return;
2029         }
2030         RELEASE_ASSERT_NOT_REACHED();
2031         return;
2032     }
2033     
2034     case BooleanUse: {
2035         SpeculateBooleanOperand op1(this, node->child1());
2036         GPRTemporary result(this, Reuse, op1);
2037         
2038         m_jit.move(op1.gpr(), result.gpr());
2039         m_jit.and32(JITCompiler::TrustedImm32(1), result.gpr());
2040         
2041         int32Result(result.gpr(), node);
2042         return;
2043     }
2044
2045     default:
2046         ASSERT(!m_compileOkay);
2047         return;
2048     }
2049 }
2050
2051 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2052 {
2053     if (doesOverflow(node->arithMode())) {
2054         // We know that this sometimes produces doubles. So produce a double every
2055         // time. This at least allows subsequent code to not have weird conditionals.
2056             
2057         SpeculateInt32Operand op1(this, node->child1());
2058         FPRTemporary result(this);
2059             
2060         GPRReg inputGPR = op1.gpr();
2061         FPRReg outputFPR = result.fpr();
2062             
2063         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2064             
2065         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2066         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2067         positive.link(&m_jit);
2068             
2069         doubleResult(outputFPR, node);
2070         return;
2071     }
2072     
2073     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2074
2075     SpeculateInt32Operand op1(this, node->child1());
2076     GPRTemporary result(this);
2077
2078     m_jit.move(op1.gpr(), result.gpr());
2079
2080     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2081
2082     int32Result(result.gpr(), node, op1.format());
2083 }
2084
2085 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2086 {
2087     SpeculateDoubleOperand op1(this, node->child1());
2088     FPRTemporary scratch(this);
2089     GPRTemporary result(this);
2090     
2091     FPRReg valueFPR = op1.fpr();
2092     FPRReg scratchFPR = scratch.fpr();
2093     GPRReg resultGPR = result.gpr();
2094
2095     JITCompiler::JumpList failureCases;
2096     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2097     m_jit.branchConvertDoubleToInt32(
2098         valueFPR, resultGPR, failureCases, scratchFPR,
2099         shouldCheckNegativeZero(node->arithMode()));
2100     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2101
2102     int32Result(resultGPR, node);
2103 }
2104
2105 void SpeculativeJIT::compileInt32ToDouble(Node* node)
2106 {
2107     ASSERT(!isInt32Constant(node->child1().node())); // This should have been constant folded.
2108     
2109     if (isInt32Speculation(m_state.forNode(node->child1()).m_type)) {
2110         SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2111         FPRTemporary result(this);
2112         m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2113         doubleResult(result.fpr(), node);
2114         return;
2115     }
2116     
2117     JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2118     FPRTemporary result(this);
2119     
2120 #if USE(JSVALUE64)
2121     GPRTemporary temp(this);
2122
2123     GPRReg op1GPR = op1.gpr();
2124     GPRReg tempGPR = temp.gpr();
2125     FPRReg resultFPR = result.fpr();
2126     
2127     JITCompiler::Jump isInteger = m_jit.branch64(
2128         MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2129     
2130     if (needsTypeCheck(node->child1(), SpecFullNumber)) {
2131         typeCheck(
2132             JSValueRegs(op1GPR), node->child1(), SpecFullNumber,
2133             m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2134     }
2135     
2136     m_jit.move(op1GPR, tempGPR);
2137     unboxDouble(tempGPR, resultFPR);
2138     JITCompiler::Jump done = m_jit.jump();
2139     
2140     isInteger.link(&m_jit);
2141     m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2142     done.link(&m_jit);
2143 #else
2144     FPRTemporary temp(this);
2145     
2146     GPRReg op1TagGPR = op1.tagGPR();
2147     GPRReg op1PayloadGPR = op1.payloadGPR();
2148     FPRReg tempFPR = temp.fpr();
2149     FPRReg resultFPR = result.fpr();
2150     
2151     JITCompiler::Jump isInteger = m_jit.branch32(
2152         MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2153     
2154     if (needsTypeCheck(node->child1(), SpecFullNumber)) {
2155         typeCheck(
2156             JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecFullNumber,
2157             m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2158     }
2159     
2160     unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2161     JITCompiler::Jump done = m_jit.jump();
2162     
2163     isInteger.link(&m_jit);
2164     m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2165     done.link(&m_jit);
2166 #endif
2167     
2168     doubleResult(resultFPR, node);
2169 }
2170
2171 static double clampDoubleToByte(double d)
2172 {
2173     d += 0.5;
2174     if (!(d > 0))
2175         d = 0;
2176     else if (d > 255)
2177         d = 255;
2178     return d;
2179 }
2180
2181 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2182 {
2183     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2184     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2185     jit.xorPtr(result, result);
2186     MacroAssembler::Jump clamped = jit.jump();
2187     tooBig.link(&jit);
2188     jit.move(JITCompiler::TrustedImm32(255), result);
2189     clamped.link(&jit);
2190     inBounds.link(&jit);
2191 }
2192
2193 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2194 {
2195     // Unordered compare so we pick up NaN
2196     static const double zero = 0;
2197     static const double byteMax = 255;
2198     static const double half = 0.5;
2199     jit.loadDouble(&zero, scratch);
2200     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2201     jit.loadDouble(&byteMax, scratch);
2202     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2203     
2204     jit.loadDouble(&half, scratch);
2205     // FIXME: This should probably just use a floating point round!
2206     // https://bugs.webkit.org/show_bug.cgi?id=72054
2207     jit.addDouble(source, scratch);
2208     jit.truncateDoubleToInt32(scratch, result);   
2209     MacroAssembler::Jump truncatedInt = jit.jump();
2210     
2211     tooSmall.link(&jit);
2212     jit.xorPtr(result, result);
2213     MacroAssembler::Jump zeroed = jit.jump();
2214     
2215     tooBig.link(&jit);
2216     jit.move(JITCompiler::TrustedImm32(255), result);
2217     
2218     truncatedInt.link(&jit);
2219     zeroed.link(&jit);
2220
2221 }
2222
2223 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2224 {
2225     if (node->op() == PutByValAlias)
2226         return JITCompiler::Jump();
2227     if (JSArrayBufferView* view = m_jit.graph().tryGetFoldableViewForChild1(node)) {
2228         uint32_t length = view->length();
2229         Node* indexNode = m_jit.graph().child(node, 1).node();
2230         if (m_jit.graph().isInt32Constant(indexNode) && static_cast<uint32_t>(m_jit.graph().valueOfInt32Constant(indexNode)) < length)
2231             return JITCompiler::Jump();
2232         return m_jit.branch32(
2233             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2234     }
2235     return m_jit.branch32(
2236         MacroAssembler::AboveOrEqual, indexGPR,
2237         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2238 }
2239
2240 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2241 {
2242     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2243     if (!jump.isSet())
2244         return;
2245     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2246 }
2247
2248 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2249 {
2250     ASSERT(isInt(type));
2251     
2252     SpeculateCellOperand base(this, node->child1());
2253     SpeculateStrictInt32Operand property(this, node->child2());
2254     StorageOperand storage(this, node->child3());
2255
2256     GPRReg baseReg = base.gpr();
2257     GPRReg propertyReg = property.gpr();
2258     GPRReg storageReg = storage.gpr();
2259
2260     GPRTemporary result(this);
2261     GPRReg resultReg = result.gpr();
2262
2263     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2264
2265     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2266     switch (elementSize(type)) {
2267     case 1:
2268         if (isSigned(type))
2269             m_jit.load8Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2270         else
2271             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2272         break;
2273     case 2:
2274         if (isSigned(type))
2275             m_jit.load16Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2276         else
2277             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2278         break;
2279     case 4:
2280         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2281         break;
2282     default:
2283         CRASH();
2284     }
2285     if (elementSize(type) < 4 || isSigned(type)) {
2286         int32Result(resultReg, node);
2287         return;
2288     }
2289     
2290     ASSERT(elementSize(type) == 4 && !isSigned(type));
2291     if (node->shouldSpeculateInt32()) {
2292         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2293         int32Result(resultReg, node);
2294         return;
2295     }
2296     
2297 #if USE(JSVALUE64)
2298     if (node->shouldSpeculateMachineInt()) {
2299         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2300         strictInt52Result(resultReg, node);
2301         return;
2302     }
2303 #endif
2304     
2305     FPRTemporary fresult(this);
2306     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2307     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2308     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2309     positive.link(&m_jit);
2310     doubleResult(fresult.fpr(), node);
2311 }
2312
2313 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2314 {
2315     ASSERT(isInt(type));
2316     
2317     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2318     GPRReg storageReg = storage.gpr();
2319     
2320     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2321     
2322     GPRTemporary value;
2323     GPRReg valueGPR = InvalidGPRReg;
2324     
2325     if (valueUse->isConstant()) {
2326         JSValue jsValue = valueOfJSConstant(valueUse.node());
2327         if (!jsValue.isNumber()) {
2328             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2329             noResult(node);
2330             return;
2331         }
2332         double d = jsValue.asNumber();
2333         if (isClamped(type)) {
2334             ASSERT(elementSize(type) == 1);
2335             d = clampDoubleToByte(d);
2336         }
2337         GPRTemporary scratch(this);
2338         GPRReg scratchReg = scratch.gpr();
2339         m_jit.move(Imm32(toInt32(d)), scratchReg);
2340         value.adopt(scratch);
2341         valueGPR = scratchReg;
2342     } else {
2343         switch (valueUse.useKind()) {
2344         case Int32Use: {
2345             SpeculateInt32Operand valueOp(this, valueUse);
2346             GPRTemporary scratch(this);
2347             GPRReg scratchReg = scratch.gpr();
2348             m_jit.move(valueOp.gpr(), scratchReg);
2349             if (isClamped(type)) {
2350                 ASSERT(elementSize(type) == 1);
2351                 compileClampIntegerToByte(m_jit, scratchReg);
2352             }
2353             value.adopt(scratch);
2354             valueGPR = scratchReg;
2355             break;
2356         }
2357             
2358 #if USE(JSVALUE64)
2359         case MachineIntUse: {
2360             SpeculateStrictInt52Operand valueOp(this, valueUse);
2361             GPRTemporary scratch(this);
2362             GPRReg scratchReg = scratch.gpr();
2363             m_jit.move(valueOp.gpr(), scratchReg);
2364             if (isClamped(type)) {
2365                 ASSERT(elementSize(type) == 1);
2366                 MacroAssembler::Jump inBounds = m_jit.branch64(
2367                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2368                 MacroAssembler::Jump tooBig = m_jit.branch64(
2369                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2370                 m_jit.move(TrustedImm32(0), scratchReg);
2371                 MacroAssembler::Jump clamped = m_jit.jump();
2372                 tooBig.link(&m_jit);
2373                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2374                 clamped.link(&m_jit);
2375                 inBounds.link(&m_jit);
2376             }
2377             value.adopt(scratch);
2378             valueGPR = scratchReg;
2379             break;
2380         }
2381 #endif // USE(JSVALUE64)
2382             
2383         case NumberUse: {
2384             if (isClamped(type)) {
2385                 ASSERT(elementSize(type) == 1);
2386                 SpeculateDoubleOperand valueOp(this, valueUse);
2387                 GPRTemporary result(this);
2388                 FPRTemporary floatScratch(this);
2389                 FPRReg fpr = valueOp.fpr();
2390                 GPRReg gpr = result.gpr();
2391                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2392                 value.adopt(result);
2393                 valueGPR = gpr;
2394             } else {
2395                 SpeculateDoubleOperand valueOp(this, valueUse);
2396                 GPRTemporary result(this);
2397                 FPRReg fpr = valueOp.fpr();
2398                 GPRReg gpr = result.gpr();
2399                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2400                 m_jit.xorPtr(gpr, gpr);
2401                 MacroAssembler::Jump fixed = m_jit.jump();
2402                 notNaN.link(&m_jit);
2403                 
2404                 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2405                     fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2406                 
2407                 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr));
2408                 
2409                 fixed.link(&m_jit);
2410                 value.adopt(result);
2411                 valueGPR = gpr;
2412             }
2413             break;
2414         }
2415             
2416         default:
2417             RELEASE_ASSERT_NOT_REACHED();
2418             break;
2419         }
2420     }
2421     
2422     ASSERT_UNUSED(valueGPR, valueGPR != property);
2423     ASSERT(valueGPR != base);
2424     ASSERT(valueGPR != storageReg);
2425     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2426     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2427         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2428         outOfBounds = MacroAssembler::Jump();
2429     }
2430
2431     switch (elementSize(type)) {
2432     case 1:
2433         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2434         break;
2435     case 2:
2436         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2437         break;
2438     case 4:
2439         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2440         break;
2441     default:
2442         CRASH();
2443     }
2444     if (outOfBounds.isSet())
2445         outOfBounds.link(&m_jit);
2446     noResult(node);
2447 }
2448
2449 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2450 {
2451     ASSERT(isFloat(type));
2452     
2453     SpeculateCellOperand base(this, node->child1());
2454     SpeculateStrictInt32Operand property(this, node->child2());
2455     StorageOperand storage(this, node->child3());
2456
2457     GPRReg baseReg = base.gpr();
2458     GPRReg propertyReg = property.gpr();
2459     GPRReg storageReg = storage.gpr();
2460
2461     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2462
2463     FPRTemporary result(this);
2464     FPRReg resultReg = result.fpr();
2465     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2466     switch (elementSize(type)) {
2467     case 4:
2468         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2469         m_jit.convertFloatToDouble(resultReg, resultReg);
2470         break;
2471     case 8: {
2472         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2473         break;
2474     }
2475     default:
2476         RELEASE_ASSERT_NOT_REACHED();
2477     }
2478     
2479     MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, resultReg, resultReg);
2480     static const double NaN = QNaN;
2481     m_jit.loadDouble(&NaN, resultReg);
2482     notNaN.link(&m_jit);
2483     
2484     doubleResult(resultReg, node);
2485 }
2486
2487 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2488 {
2489     ASSERT(isFloat(type));
2490     
2491     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2492     GPRReg storageReg = storage.gpr();
2493     
2494     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2495     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2496
2497     SpeculateDoubleOperand valueOp(this, valueUse);
2498     FPRTemporary scratch(this);
2499     FPRReg valueFPR = valueOp.fpr();
2500     FPRReg scratchFPR = scratch.fpr();
2501
2502     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2503     
2504     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2505     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2506         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2507         outOfBounds = MacroAssembler::Jump();
2508     }
2509     
2510     switch (elementSize(type)) {
2511     case 4: {
2512         m_jit.moveDouble(valueFPR, scratchFPR);
2513         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2514         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2515         break;
2516     }
2517     case 8:
2518         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2519         break;
2520     default:
2521         RELEASE_ASSERT_NOT_REACHED();
2522     }
2523     if (outOfBounds.isSet())
2524         outOfBounds.link(&m_jit);
2525     noResult(node);
2526 }
2527
2528 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg)
2529 {
2530     // Check that prototype is an object.
2531     m_jit.loadPtr(MacroAssembler::Address(prototypeReg, JSCell::structureOffset()), scratchReg);
2532     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(scratchReg));
2533     
2534     // Initialize scratchReg with the value being checked.
2535     m_jit.move(valueReg, scratchReg);
2536     
2537     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2538     MacroAssembler::Label loop(&m_jit);
2539     m_jit.loadPtr(MacroAssembler::Address(scratchReg, JSCell::structureOffset()), scratchReg);
2540 #if USE(JSVALUE64)
2541     m_jit.load64(MacroAssembler::Address(scratchReg, Structure::prototypeOffset()), scratchReg);
2542     MacroAssembler::Jump isInstance = m_jit.branch64(MacroAssembler::Equal, scratchReg, prototypeReg);
2543     m_jit.branchTest64(MacroAssembler::Zero, scratchReg, GPRInfo::tagMaskRegister).linkTo(loop, &m_jit);
2544 #else
2545     m_jit.load32(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), scratchReg);
2546     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2547     m_jit.branchTest32(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2548 #endif
2549     
2550     // No match - result is false.
2551 #if USE(JSVALUE64)
2552     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2553 #else
2554     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2555 #endif
2556     MacroAssembler::Jump putResult = m_jit.jump();
2557     
2558     isInstance.link(&m_jit);
2559 #if USE(JSVALUE64)
2560     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2561 #else
2562     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2563 #endif
2564     
2565     putResult.link(&m_jit);
2566 }
2567
2568 void SpeculativeJIT::compileInstanceOf(Node* node)
2569 {
2570     if (node->child1().useKind() == UntypedUse) {
2571         // It might not be a cell. Speculate less aggressively.
2572         // Or: it might only be used once (i.e. by us), so we get zero benefit
2573         // from speculating any more aggressively than we absolutely need to.
2574         
2575         JSValueOperand value(this, node->child1());
2576         SpeculateCellOperand prototype(this, node->child2());
2577         GPRTemporary scratch(this);
2578         
2579         GPRReg prototypeReg = prototype.gpr();
2580         GPRReg scratchReg = scratch.gpr();
2581         
2582 #if USE(JSVALUE64)
2583         GPRReg valueReg = value.gpr();
2584         MacroAssembler::Jump isCell = m_jit.branchTest64(MacroAssembler::Zero, valueReg, GPRInfo::tagMaskRegister);
2585         m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2586 #else
2587         GPRReg valueTagReg = value.tagGPR();
2588         GPRReg valueReg = value.payloadGPR();
2589         MacroAssembler::Jump isCell = m_jit.branch32(MacroAssembler::Equal, valueTagReg, TrustedImm32(JSValue::CellTag));
2590         m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2591 #endif
2592
2593         MacroAssembler::Jump done = m_jit.jump();
2594         
2595         isCell.link(&m_jit);
2596         
2597         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg);
2598         
2599         done.link(&m_jit);
2600
2601 #if USE(JSVALUE64)
2602         jsValueResult(scratchReg, node, DataFormatJSBoolean);
2603 #else
2604         booleanResult(scratchReg, node);
2605 #endif
2606         return;
2607     }
2608     
2609     SpeculateCellOperand value(this, node->child1());
2610     SpeculateCellOperand prototype(this, node->child2());
2611     
2612     GPRTemporary scratch(this);
2613     
2614     GPRReg valueReg = value.gpr();
2615     GPRReg prototypeReg = prototype.gpr();
2616     GPRReg scratchReg = scratch.gpr();
2617     
2618     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg);
2619
2620 #if USE(JSVALUE64)
2621     jsValueResult(scratchReg, node, DataFormatJSBoolean);
2622 #else
2623     booleanResult(scratchReg, node);
2624 #endif
2625 }
2626
2627 void SpeculativeJIT::compileAdd(Node* node)
2628 {
2629     switch (node->binaryUseKind()) {
2630     case Int32Use: {
2631         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2632         
2633         if (isInt32Constant(node->child1().node())) {
2634             int32_t imm1 = valueOfInt32Constant(node->child1().node());
2635             SpeculateInt32Operand op2(this, node->child2());
2636             GPRTemporary result(this);
2637
2638             if (!shouldCheckOverflow(node->arithMode())) {
2639                 m_jit.move(op2.gpr(), result.gpr());
2640                 m_jit.add32(Imm32(imm1), result.gpr());
2641             } else
2642                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
2643
2644             int32Result(result.gpr(), node);
2645             return;
2646         }
2647         
2648         if (isInt32Constant(node->child2().node())) {
2649             SpeculateInt32Operand op1(this, node->child1());
2650             int32_t imm2 = valueOfInt32Constant(node->child2().node());
2651             GPRTemporary result(this);
2652                 
2653             if (!shouldCheckOverflow(node->arithMode())) {
2654                 m_jit.move(op1.gpr(), result.gpr());
2655                 m_jit.add32(Imm32(imm2), result.gpr());
2656             } else
2657                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
2658
2659             int32Result(result.gpr(), node);
2660             return;
2661         }
2662                 
2663         SpeculateInt32Operand op1(this, node->child1());
2664         SpeculateInt32Operand op2(this, node->child2());
2665         GPRTemporary result(this, Reuse, op1, op2);
2666
2667         GPRReg gpr1 = op1.gpr();
2668         GPRReg gpr2 = op2.gpr();
2669         GPRReg gprResult = result.gpr();
2670
2671         if (!shouldCheckOverflow(node->arithMode())) {
2672             if (gpr1 == gprResult)
2673                 m_jit.add32(gpr2, gprResult);
2674             else {
2675                 m_jit.move(gpr2, gprResult);
2676                 m_jit.add32(gpr1, gprResult);
2677             }
2678         } else {
2679             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
2680                 
2681             if (gpr1 == gprResult)
2682                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
2683             else if (gpr2 == gprResult)
2684                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
2685             else
2686                 speculationCheck(Overflow, JSValueRegs(), 0, check);
2687         }
2688
2689         int32Result(gprResult, node);
2690         return;
2691     }
2692         
2693 #if USE(JSVALUE64)
2694     case MachineIntUse: {
2695         ASSERT(shouldCheckOverflow(node->arithMode()));
2696         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2697
2698         // Will we need an overflow check? If we can prove that neither input can be
2699         // Int52 then the overflow check will not be necessary.
2700         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2701             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2702             SpeculateWhicheverInt52Operand op1(this, node->child1());
2703             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2704             GPRTemporary result(this, Reuse, op1);
2705             m_jit.move(op1.gpr(), result.gpr());
2706             m_jit.add64(op2.gpr(), result.gpr());
2707             int52Result(result.gpr(), node, op1.format());
2708             return;
2709         }
2710         
2711         SpeculateInt52Operand op1(this, node->child1());
2712         SpeculateInt52Operand op2(this, node->child2());
2713         GPRTemporary result(this);
2714         m_jit.move(op1.gpr(), result.gpr());
2715         speculationCheck(
2716             Int52Overflow, JSValueRegs(), 0,
2717             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2718         int52Result(result.gpr(), node);
2719         return;
2720     }
2721 #endif // USE(JSVALUE64)
2722     
2723     case NumberUse: {
2724         SpeculateDoubleOperand op1(this, node->child1());
2725         SpeculateDoubleOperand op2(this, node->child2());
2726         FPRTemporary result(this, op1, op2);
2727
2728         FPRReg reg1 = op1.fpr();
2729         FPRReg reg2 = op2.fpr();
2730         m_jit.addDouble(reg1, reg2, result.fpr());
2731
2732         doubleResult(result.fpr(), node);
2733         return;
2734     }
2735         
2736     default:
2737         RELEASE_ASSERT_NOT_REACHED();
2738         break;
2739     }
2740 }
2741
2742 void SpeculativeJIT::compileMakeRope(Node* node)
2743 {
2744     ASSERT(node->child1().useKind() == KnownStringUse);
2745     ASSERT(node->child2().useKind() == KnownStringUse);
2746     ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
2747     
2748     SpeculateCellOperand op1(this, node->child1());
2749     SpeculateCellOperand op2(this, node->child2());
2750     SpeculateCellOperand op3(this, node->child3());
2751     GPRTemporary result(this);
2752     GPRTemporary allocator(this);
2753     GPRTemporary scratch(this);
2754     
2755     GPRReg opGPRs[3];
2756     unsigned numOpGPRs;
2757     opGPRs[0] = op1.gpr();
2758     opGPRs[1] = op2.gpr();
2759     if (node->child3()) {
2760         opGPRs[2] = op3.gpr();
2761         numOpGPRs = 3;
2762     } else {
2763         opGPRs[2] = InvalidGPRReg;
2764         numOpGPRs = 2;
2765     }
2766     GPRReg resultGPR = result.gpr();
2767     GPRReg allocatorGPR = allocator.gpr();
2768     GPRReg scratchGPR = scratch.gpr();
2769     
2770     JITCompiler::JumpList slowPath;
2771     MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithImmortalStructureDestructor(sizeof(JSRopeString));
2772     m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
2773     emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
2774         
2775     m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
2776     for (unsigned i = 0; i < numOpGPRs; ++i)
2777         m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2778     for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
2779         m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2780     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
2781     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
2782     for (unsigned i = 1; i < numOpGPRs; ++i) {
2783         m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
2784         m_jit.add32(JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR);
2785     }
2786     m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
2787     m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
2788     m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
2789     
2790     switch (numOpGPRs) {
2791     case 2:
2792         addSlowPathGenerator(slowPathCall(
2793             slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
2794         break;
2795     case 3:
2796         addSlowPathGenerator(slowPathCall(
2797             slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
2798         break;
2799     default:
2800         RELEASE_ASSERT_NOT_REACHED();
2801         break;
2802     }
2803         
2804     cellResult(resultGPR, node);
2805 }
2806
2807 void SpeculativeJIT::compileArithSub(Node* node)
2808 {
2809     switch (node->binaryUseKind()) {
2810     case Int32Use: {
2811         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2812         
2813         if (isNumberConstant(node->child2().node())) {
2814             SpeculateInt32Operand op1(this, node->child1());
2815             int32_t imm2 = valueOfInt32Constant(node->child2().node());
2816             GPRTemporary result(this);
2817
2818             if (!shouldCheckOverflow(node->arithMode())) {
2819                 m_jit.move(op1.gpr(), result.gpr());
2820                 m_jit.sub32(Imm32(imm2), result.gpr());
2821             } else {
2822                 GPRTemporary scratch(this);
2823                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
2824             }
2825
2826             int32Result(result.gpr(), node);
2827             return;
2828         }
2829             
2830         if (isNumberConstant(node->child1().node())) {
2831             int32_t imm1 = valueOfInt32Constant(node->child1().node());
2832             SpeculateInt32Operand op2(this, node->child2());
2833             GPRTemporary result(this);
2834                 
2835             m_jit.move(Imm32(imm1), result.gpr());
2836             if (!shouldCheckOverflow(node->arithMode()))
2837                 m_jit.sub32(op2.gpr(), result.gpr());
2838             else
2839                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2840                 
2841             int32Result(result.gpr(), node);
2842             return;
2843         }
2844             
2845         SpeculateInt32Operand op1(this, node->child1());
2846         SpeculateInt32Operand op2(this, node->child2());
2847         GPRTemporary result(this);
2848
2849         if (!shouldCheckOverflow(node->arithMode())) {
2850             m_jit.move(op1.gpr(), result.gpr());
2851             m_jit.sub32(op2.gpr(), result.gpr());
2852         } else
2853             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
2854
2855         int32Result(result.gpr(), node);
2856         return;
2857     }
2858         
2859 #if USE(JSVALUE64)
2860     case MachineIntUse: {
2861         ASSERT(shouldCheckOverflow(node->arithMode()));
2862         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2863
2864         // Will we need an overflow check? If we can prove that neither input can be
2865         // Int52 then the overflow check will not be necessary.
2866         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2867             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2868             SpeculateWhicheverInt52Operand op1(this, node->child1());
2869             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2870             GPRTemporary result(this, Reuse, op1);
2871             m_jit.move(op1.gpr(), result.gpr());
2872             m_jit.sub64(op2.gpr(), result.gpr());
2873             int52Result(result.gpr(), node, op1.format());
2874             return;
2875         }
2876         
2877         SpeculateInt52Operand op1(this, node->child1());
2878         SpeculateInt52Operand op2(this, node->child2());
2879         GPRTemporary result(this);
2880         m_jit.move(op1.gpr(), result.gpr());
2881         speculationCheck(
2882             Int52Overflow, JSValueRegs(), 0,
2883             m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2884         int52Result(result.gpr(), node);
2885         return;
2886     }
2887 #endif // USE(JSVALUE64)
2888
2889     case NumberUse: {
2890         SpeculateDoubleOperand op1(this, node->child1());
2891         SpeculateDoubleOperand op2(this, node->child2());
2892         FPRTemporary result(this, op1);
2893
2894         FPRReg reg1 = op1.fpr();
2895         FPRReg reg2 = op2.fpr();
2896         m_jit.subDouble(reg1, reg2, result.fpr());
2897
2898         doubleResult(result.fpr(), node);
2899         return;
2900     }
2901         
2902     default:
2903         RELEASE_ASSERT_NOT_REACHED();
2904         return;
2905     }
2906 }
2907
2908 void SpeculativeJIT::compileArithNegate(Node* node)
2909 {
2910     switch (node->child1().useKind()) {
2911     case Int32Use: {
2912         SpeculateInt32Operand op1(this, node->child1());
2913         GPRTemporary result(this);
2914
2915         m_jit.move(op1.gpr(), result.gpr());
2916
2917         // Note: there is no notion of being not used as a number, but someone
2918         // caring about negative zero.
2919         
2920         if (!shouldCheckOverflow(node->arithMode()))
2921             m_jit.neg32(result.gpr());
2922         else if (!shouldCheckNegativeZero(node->arithMode()))
2923             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
2924         else {
2925             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
2926             m_jit.neg32(result.gpr());
2927         }
2928
2929         int32Result(result.gpr(), node);
2930         return;
2931     }
2932
2933 #if USE(JSVALUE64)
2934     case MachineIntUse: {
2935         ASSERT(shouldCheckOverflow(node->arithMode()));
2936         
2937         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)) {
2938             SpeculateWhicheverInt52Operand op1(this, node->child1());
2939             GPRTemporary result(this);
2940             GPRReg op1GPR = op1.gpr();
2941             GPRReg resultGPR = result.gpr();
2942             m_jit.move(op1GPR, resultGPR);
2943             m_jit.neg64(resultGPR);
2944             if (shouldCheckNegativeZero(node->arithMode())) {
2945                 speculationCheck(
2946                     NegativeZero, JSValueRegs(), 0,
2947                     m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
2948             }
2949             int52Result(resultGPR, node, op1.format());
2950             return;
2951         }
2952         
2953         SpeculateInt52Operand op1(this, node->child1());
2954         GPRTemporary result(this);
2955         GPRReg op1GPR = op1.gpr();
2956         GPRReg resultGPR = result.gpr();
2957         m_jit.move(op1GPR, resultGPR);
2958         speculationCheck(
2959             Int52Overflow, JSValueRegs(), 0,
2960             m_jit.branchNeg64(MacroAssembler::Overflow, resultGPR));
2961         if (shouldCheckNegativeZero(node->arithMode())) {
2962             speculationCheck(
2963                 NegativeZero, JSValueRegs(), 0,
2964                 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
2965         }
2966         int52Result(resultGPR, node);
2967         return;
2968     }
2969 #endif // USE(JSVALUE64)
2970         
2971     case NumberUse: {
2972         SpeculateDoubleOperand op1(this, node->child1());
2973         FPRTemporary result(this);
2974         
2975         m_jit.negateDouble(op1.fpr(), result.fpr());
2976         
2977         doubleResult(result.fpr(), node);
2978         return;
2979     }
2980         
2981     default:
2982         RELEASE_ASSERT_NOT_REACHED();
2983         return;
2984     }
2985 }
2986 void SpeculativeJIT::compileArithMul(Node* node)
2987 {
2988     switch (node->binaryUseKind()) {
2989     case Int32Use: {
2990         SpeculateInt32Operand op1(this, node->child1());
2991         SpeculateInt32Operand op2(this, node->child2());
2992         GPRTemporary result(this);
2993
2994         GPRReg reg1 = op1.gpr();
2995         GPRReg reg2 = op2.gpr();
2996
2997         // We can perform truncated multiplications if we get to this point, because if the
2998         // fixup phase could not prove that it would be safe, it would have turned us into
2999         // a double multiplication.
3000         if (!shouldCheckOverflow(node->arithMode())) {
3001             m_jit.move(reg1, result.gpr());
3002             m_jit.mul32(reg2, result.gpr());
3003         } else {
3004             speculationCheck(
3005                 Overflow, JSValueRegs(), 0,
3006                 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3007         }
3008             
3009         // Check for negative zero, if the users of this node care about such things.
3010         if (shouldCheckNegativeZero(node->arithMode())) {
3011             MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3012             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
3013             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
3014             resultNonZero.link(&m_jit);
3015         }
3016
3017         int32Result(result.gpr(), node);
3018         return;
3019     }
3020     
3021 #if USE(JSVALUE64)   
3022     case MachineIntUse: {
3023         ASSERT(shouldCheckOverflow(node->arithMode()));
3024         
3025         // This is super clever. We want to do an int52 multiplication and check the
3026         // int52 overflow bit. There is no direct hardware support for this, but we do
3027         // have the ability to do an int64 multiplication and check the int64 overflow
3028         // bit. We leverage that. Consider that a, b are int52 numbers inside int64
3029         // registers, with the high 12 bits being sign-extended. We can do:
3030         //
3031         //     (a * (b << 12))
3032         //
3033         // This will give us a left-shifted int52 (value is in high 52 bits, low 16
3034         // bits are zero) plus the int52 overflow bit. I.e. whether this 64-bit
3035         // multiplication overflows is identical to whether the 'a * b' 52-bit
3036         // multiplication overflows.
3037         //
3038         // In our nomenclature, this is:
3039         //
3040         //     strictInt52(a) * int52(b) => int52
3041         //
3042         // That is "strictInt52" means unshifted and "int52" means left-shifted by 16
3043         // bits.
3044         //
3045         // We don't care which of op1 or op2 serves as the left-shifted operand, so
3046         // we just do whatever is more convenient for op1 and have op2 do the
3047         // opposite. This ensures that we do at most one shift.
3048
3049         SpeculateWhicheverInt52Operand op1(this, node->child1());
3050         SpeculateWhicheverInt52Operand op2(this, node->child2(), OppositeShift, op1);
3051         GPRTemporary result(this);
3052         
3053         GPRReg op1GPR = op1.gpr();
3054         GPRReg op2GPR = op2.gpr();
3055         GPRReg resultGPR = result.gpr();
3056         
3057         m_jit.move(op1GPR, resultGPR);
3058         speculationCheck(
3059             Int52Overflow, JSValueRegs(), 0,
3060             m_jit.branchMul64(MacroAssembler::Overflow, op2GPR, resultGPR));
3061         
3062         if (shouldCheckNegativeZero(node->arithMode())) {
3063             MacroAssembler::Jump resultNonZero = m_jit.branchTest64(
3064                 MacroAssembler::NonZero, resultGPR);
3065             speculationCheck(
3066                 NegativeZero, JSValueRegs(), 0,
3067                 m_jit.branch64(MacroAssembler::LessThan, op1GPR, TrustedImm64(0)));
3068             speculationCheck(
3069                 NegativeZero, JSValueRegs(), 0,
3070                 m_jit.branch64(MacroAssembler::LessThan, op2GPR, TrustedImm64(0)));
3071             resultNonZero.link(&m_jit);
3072         }
3073         
3074         int52Result(resultGPR, node);
3075         return;
3076     }
3077 #endif // USE(JSVALUE64)
3078         
3079     case NumberUse: {
3080         SpeculateDoubleOperand op1(this, node->child1());
3081         SpeculateDoubleOperand op2(this, node->child2());
3082         FPRTemporary result(this, op1, op2);
3083         
3084         FPRReg reg1 = op1.fpr();
3085         FPRReg reg2 = op2.fpr();
3086         
3087         m_jit.mulDouble(reg1, reg2, result.fpr());
3088         
3089         doubleResult(result.fpr(), node);
3090         return;
3091     }
3092         
3093     default:
3094         RELEASE_ASSERT_NOT_REACHED();
3095         return;
3096     }
3097 }
3098
3099 void SpeculativeJIT::compileArithDiv(Node* node)
3100 {
3101     switch (node->binaryUseKind()) {
3102     case Int32Use: {
3103 #if CPU(X86) || CPU(X86_64)
3104         SpeculateInt32Operand op1(this, node->child1());
3105         SpeculateInt32Operand op2(this, node->child2());
3106         GPRTemporary eax(this, X86Registers::eax);
3107         GPRTemporary edx(this, X86Registers::edx);
3108         GPRReg op1GPR = op1.gpr();
3109         GPRReg op2GPR = op2.gpr();
3110     
3111         GPRReg op2TempGPR;
3112         GPRReg temp;
3113         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3114             op2TempGPR = allocate();
3115             temp = op2TempGPR;
3116         } else {
3117             op2TempGPR = InvalidGPRReg;
3118             if (op1GPR == X86Registers::eax)
3119                 temp = X86Registers::edx;
3120             else
3121                 temp = X86Registers::eax;
3122         }
3123     
3124         ASSERT(temp != op1GPR);
3125         ASSERT(temp != op2GPR);
3126     
3127         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3128     
3129         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3130     
3131         JITCompiler::JumpList done;
3132         if (shouldCheckOverflow(node->arithMode())) {
3133             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3134             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3135         } else {
3136             // This is the case where we convert the result to an int after we're done, and we
3137             // already know that the denominator is either -1 or 0. So, if the denominator is
3138             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3139             // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
3140             // are happy to fall through to a normal division, since we're just dividing
3141             // something by negative 1.
3142         
3143             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3144             m_jit.move(TrustedImm32(0), eax.gpr());
3145             done.append(m_jit.jump());
3146         
3147             notZero.link(&m_jit);
3148             JITCompiler::Jump notNeg2ToThe31 =
3149                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3150             m_jit.zeroExtend32ToPtr(op1GPR, eax.gpr());
3151             done.append(m_jit.jump());
3152         
3153             notNeg2ToThe31.link(&m_jit);
3154         }
3155     
3156         safeDenominator.link(&m_jit);
3157     
3158         // If the user cares about negative zero, then speculate that we're not about
3159         // to produce negative zero.
3160         if (shouldCheckNegativeZero(node->arithMode())) {
3161             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3162             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3163             numeratorNonZero.link(&m_jit);
3164         }
3165     
3166         if (op2TempGPR != InvalidGPRReg) {
3167             m_jit.move(op2GPR, op2TempGPR);
3168             op2GPR = op2TempGPR;
3169         }
3170             
3171         m_jit.move(op1GPR, eax.gpr());
3172         m_jit.assembler().cdq();
3173         m_jit.assembler().idivl_r(op2GPR);
3174             
3175         if (op2TempGPR != InvalidGPRReg)
3176             unlock(op2TempGPR);
3177
3178         // Check that there was no remainder. If there had been, then we'd be obligated to
3179         // produce a double result instead.
3180         if (shouldCheckOverflow(node->arithMode()))
3181             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
3182         
3183         done.link(&m_jit);
3184         int32Result(eax.gpr(), node);
3185 #elif CPU(APPLE_ARMV7S)
3186         SpeculateInt32Operand op1(this, node->child1());
3187         SpeculateInt32Operand op2(this, node->child2());
3188         GPRReg op1GPR = op1.gpr();
3189         GPRReg op2GPR = op2.gpr();
3190         GPRTemporary quotient(this);
3191         GPRTemporary multiplyAnswer(this);
3192
3193         // If the user cares about negative zero, then speculate that we're not about
3194         // to produce negative zero.
3195         if (shouldCheckNegativeZero(node->arithMode())) {
3196             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3197             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3198             numeratorNonZero.link(&m_jit);
3199         }
3200
3201         m_jit.assembler().sdiv(quotient.gpr(), op1GPR, op2GPR);
3202
3203         // Check that there was no remainder. If there had been, then we'd be obligated to
3204         // produce a double result instead.
3205         if (shouldCheckOverflow(node->arithMode())) {
3206             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
3207             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
3208         }
3209
3210         int32Result(quotient.gpr(), node);
3211 #elif CPU(ARM64)
3212         SpeculateInt32Operand op1(this, node->child1());
3213         SpeculateInt32Operand op2(this, node->child2());
3214         GPRReg op1GPR = op1.gpr();
3215         GPRReg op2GPR = op2.gpr();
3216         GPRTemporary quotient(this);
3217         GPRTemporary multiplyAnswer(this);
3218
3219         // If the user cares about negative zero, then speculate that we're not about
3220         // to produce negative zero.
3221         if (shouldCheckNegativeZero(node->arithMode())) {
3222             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3223             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3224             numeratorNonZero.link(&m_jit);
3225         }
3226
3227         m_jit.assembler().sdiv<32>(quotient.gpr(), op1GPR, op2GPR);
3228
3229         // Check that there was no remainder. If there had been, then we'd be obligated to
3230         // produce a double result instead.
3231         if (shouldCheckOverflow(node->arithMode())) {
3232             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
3233             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
3234         }
3235
3236         int32Result(quotient.gpr(), node);
3237 #else
3238         RELEASE_ASSERT_NOT_REACHED();
3239 #endif
3240         break;
3241     }
3242         
3243     case NumberUse: {
3244         SpeculateDoubleOperand op1(this, node->child1());
3245         SpeculateDoubleOperand op2(this, node->child2());
3246         FPRTemporary result(this, op1);
3247         
3248         FPRReg reg1 = op1.fpr();
3249         FPRReg reg2 = op2.fpr();
3250         m_jit.divDouble(reg1, reg2, result.fpr());
3251         
3252         doubleResult(result.fpr(), node);
3253         break;
3254     }
3255         
3256     default:
3257         RELEASE_ASSERT_NOT_REACHED();
3258         break;
3259     }
3260 }
3261
3262 void SpeculativeJIT::compileArithMod(Node* node)
3263 {
3264     switch (node->binaryUseKind()) {
3265     case Int32Use: {
3266         // In the fast path, the dividend value could be the final result
3267         // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
3268         SpeculateStrictInt32Operand op1(this, node->child1());
3269         
3270         if (isInt32Constant(node->child2().node())) {
3271             int32_t divisor = valueOfInt32Constant(node->child2().node());
3272             if (divisor > 1 && hasOneBitSet(divisor)) {
3273                 unsigned logarithm = WTF::fastLog2(divisor);
3274                 GPRReg dividendGPR = op1.gpr();
3275                 GPRTemporary result(this);
3276                 GPRReg resultGPR = result.gpr();
3277
3278                 // This is what LLVM generates. It's pretty crazy. Here's my
3279                 // attempt at understanding it.
3280                 
3281                 // First, compute either divisor - 1, or 0, depending on whether
3282                 // the dividend is negative:
3283                 //
3284                 // If dividend < 0:  resultGPR = divisor - 1
3285                 // If dividend >= 0: resultGPR = 0
3286                 m_jit.move(dividendGPR, resultGPR);
3287                 m_jit.rshift32(TrustedImm32(31), resultGPR);
3288                 m_jit.urshift32(TrustedImm32(32 - logarithm), resultGPR);
3289                 
3290                 // Add in the dividend, so that:
3291                 //
3292                 // If dividend < 0:  resultGPR = dividend + divisor - 1
3293                 // If dividend >= 0: resultGPR = dividend
3294                 m_jit.add32(dividendGPR, resultGPR);
3295                 
3296                 // Mask so as to only get the *high* bits. This rounds down
3297                 // (towards negative infinity) resultGPR to the nearest multiple
3298                 // of divisor, so that:
3299                 //
3300                 // If dividend < 0:  resultGPR = floor((dividend + divisor - 1) / divisor)
3301                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3302                 //
3303                 // Note that this can be simplified to:
3304                 //
3305                 // If dividend < 0:  resultGPR = ceil(dividend / divisor)
3306                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3307                 //
3308                 // Note that if the dividend is negative, resultGPR will also be negative.
3309                 // Regardless of the sign of dividend, resultGPR will be rounded towards
3310                 // zero, because of how things are conditionalized.
3311                 m_jit.and32(TrustedImm32(-divisor), resultGPR);
3312                 
3313                 // Subtract resultGPR from dividendGPR, which yields the remainder:
3314                 //
3315                 // resultGPR = dividendGPR - resultGPR
3316                 m_jit.neg32(resultGPR);
3317                 m_jit.add32(dividendGPR, resultGPR);
3318                 
3319                 if (shouldCheckNegativeZero(node->arithMode())) {
3320                     // Check that we're not about to create negative zero.
3321                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3322                     speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, resultGPR));
3323                     numeratorPositive.link(&m_jit);
3324                 }
3325
3326                 int32Result(resultGPR, node);
3327                 return;
3328             }
3329         }
3330         
3331 #if CPU(X86) || CPU(X86_64)
3332         if (isInt32Constant(node->child2().node())) {
3333             int32_t divisor = valueOfInt32Constant(node->child2().node());
3334             if (divisor && divisor != -1) {
3335                 GPRReg op1Gpr = op1.gpr();
3336
3337                 GPRTemporary eax(this, X86Registers::eax);
3338                 GPRTemporary edx(this, X86Registers::edx);
3339                 GPRTemporary scratch(this);
3340                 GPRReg scratchGPR = scratch.gpr();
3341
3342                 GPRReg op1SaveGPR;
3343                 if (op1Gpr == X86Registers::eax || op1Gpr == X86Registers::edx) {
3344                     op1SaveGPR = allocate();
3345                     ASSERT(op1Gpr != op1SaveGPR);
3346                     m_jit.move(op1Gpr, op1SaveGPR);
3347                 } else
3348                     op1SaveGPR = op1Gpr;
3349                 ASSERT(op1SaveGPR != X86Registers::eax);
3350                 ASSERT(op1SaveGPR != X86Registers::edx);
3351
3352                 m_jit.move(op1Gpr, eax.gpr());
3353                 m_jit.move(TrustedImm32(divisor), scratchGPR);
3354                 m_jit.assembler().cdq();
3355                 m_jit.assembler().idivl_r(scratchGPR);
3356                 if (shouldCheckNegativeZero(node->arithMode())) {
3357                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3358                     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3359                     numeratorPositive.link(&m_jit);
3360                 }
3361             
3362                 if (op1SaveGPR != op1Gpr)
3363                     unlock(op1SaveGPR);
3364
3365                 int32Result(edx.gpr(), node);
3366                 return;
3367             }
3368         }
3369 #endif
3370
3371         SpeculateInt32Operand op2(this, node->child2());
3372 #if CPU(X86) || CPU(X86_64)
3373         GPRTemporary eax(this, X86Registers::eax);
3374         GPRTemporary edx(this, X86Registers::edx);
3375         GPRReg op1GPR = op1.gpr();
3376         GPRReg op2GPR = op2.gpr();
3377     
3378         GPRReg op2TempGPR;
3379         GPRReg temp;
3380         GPRReg op1SaveGPR;
3381     
3382         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3383             op2TempGPR = allocate();
3384             temp = op2TempGPR;
3385         } else {
3386             op2TempGPR = InvalidGPRReg;
3387             if (op1GPR == X86Registers::eax)
3388                 temp = X86Registers::edx;
3389             else
3390                 temp = X86Registers::eax;
3391         }
3392     
3393         if (op1GPR == X86Registers::eax || op1GPR == X86Registers::edx) {
3394             op1SaveGPR = allocate();
3395             ASSERT(op1GPR != op1SaveGPR);
3396             m_jit.move(op1GPR, op1SaveGPR);
3397         } else
3398             op1SaveGPR = op1GPR;
3399     
3400         ASSERT(temp != op1GPR);
3401         ASSERT(temp != op2GPR);
3402         ASSERT(op1SaveGPR != X86Registers::eax);
3403         ASSERT(op1SaveGPR != X86Registers::edx);
3404     
3405         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3406     
3407         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3408     
3409         JITCompiler::JumpList done;
3410         
3411         // FIXME: -2^31 / -1 will actually yield negative zero, so we could have a
3412         // separate case for that. But it probably doesn't matter so much.
3413         if (shouldCheckOverflow(node->arithMode())) {
3414             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3415             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3416         } else {
3417             // This is the case where we convert the result to an int after we're done, and we
3418             // already know that the denominator is either -1 or 0. So, if the denominator is
3419             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3420             // -1) and the numerator is -2^31 then the result should be 0. Otherwise we are
3421             // happy to fall through to a normal division, since we're just dividing something
3422             // by negative 1.
3423         
3424             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3425             m_jit.move(TrustedImm32(0), edx.gpr());
3426             done.append(m_jit.jump());
3427         
3428             notZero.link(&m_jit);
3429             JITCompiler::Jump notNeg2ToThe31 =
3430                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3431             m_jit.move(TrustedImm32(0), edx.gpr());
3432             done.append(m_jit.jump());
3433         
3434             notNeg2ToThe31.link(&m_jit);
3435         }
3436         
3437         safeDenominator.link(&m_jit);
3438             
3439         if (op2TempGPR != InvalidGPRReg) {
3440             m_jit.move(op2GPR, op2TempGPR);
3441             op2GPR = op2TempGPR;
3442         }
3443             
3444         m_jit.move(op1GPR, eax.gpr());
3445         m_jit.assembler().cdq();
3446         m_jit.assembler().idivl_r(op2GPR);
3447             
3448         if (op2TempGPR != InvalidGPRReg)
3449             unlock(op2TempGPR);
3450
3451         // Check that we're not about to create negative zero.
3452         if (shouldCheckNegativeZero(node->arithMode())) {
3453             JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3454             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3455             numeratorPositive.link(&m_jit);
3456         }
3457     
3458         if (op1SaveGPR != op1GPR)
3459             unlock(op1SaveGPR);
3460             
3461         done.link(&m_jit);
3462         int32Result(edx.gpr(), node);
3463
3464 #elif CPU(APPLE_ARMV7S)
3465         GPRTemporary temp(this);
3466         GPRTemporary quotientThenRemainder(this);
3467         GPRTemporary multiplyAnswer(this);
3468         GPRReg dividendGPR = op1.gpr();
3469         GPRReg divisorGPR = op2.gpr();
3470         GPRReg quotientThenRemainderGPR = quotientThenRemainder.gpr();
3471         GPRReg multiplyAnswerGPR = multiplyAnswer.gpr();
3472
3473         JITCompiler::JumpList done;
3474         
3475         if (shouldCheckOverflow(node->arithMode()))
3476             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, divisorGPR));
3477         else {
3478             JITCompiler::Jump denominatorNotZero = m_jit.branchTest32(JITCompiler::NonZero, divisorGPR);
3479             m_jit.move(divisorGPR, quotientThenRemainderGPR);
3480             done.append(m_jit.jump());
3481             denominatorNotZero.link(&m_jit);
3482         }
3483
3484         m_jit.assembler().sdiv(quotientThenRemainderGPR, dividendGPR, divisorGPR);
3485         // FIXME: It seems like there are cases where we don't need this? What if we have
3486         // arithMode() == Arith::Unchecked?
3487         // https://bugs.webkit.org/show_bug.cgi?id=126444
3488         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotientThenRemainderGPR, divisorGPR, multiplyAnswerGPR));
3489         m_jit.assembler().sub(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3490
3491         // If the user cares about negative zero, then speculate that we're not about
3492         // to produce negative zero.
3493         if (shouldCheckNegativeZero(node->arithMode())) {
3494             // Check that we're not about to create negative zero.
3495             JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3496             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, quotientThenRemainderGPR));
3497             numeratorPositive.link(&m_jit);
3498         }
3499
3500         done.link(&m_jit);
3501         
3502         int32Result(quotientThenRemainderGPR, node);
3503 #elif CPU(ARM64)
3504         GPRTemporary temp(this);
3505         GPRTemporary quotientThenRemainder(this);
3506         GPRTemporary multiplyAnswer(this);
3507         GPRReg dividendGPR = op1.gpr();
3508         GPRReg divisorGPR = op2.gpr();
3509         GPRReg quotientThenRemainderGPR = quotientThenRemainder.gpr();
3510         GPRReg multiplyAnswerGPR = multiplyAnswer.gpr();
3511
3512         JITCompiler::JumpList done;
3513     
3514         if (shouldCheckOverflow(node->arithMode()))
3515             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, divisorGPR));
3516         else {
3517             JITCompiler::Jump denominatorNotZero = m_jit.branchTest32(JITCompiler::NonZero, divisorGPR);
3518             m_jit.move(divisorGPR, quotientThenRemainderGPR);
3519             done.append(m_jit.jump());
3520             denominatorNotZero.link(&m_jit);
3521         }
3522
3523         m_jit.assembler().sdiv<32>(quotientThenRemainderGPR, dividendGPR, divisorGPR);
3524         // FIXME: It seems like there are cases where we don't need this? What if we have
3525         // arithMode() == Arith::Unchecked?
3526         // https://bugs.webkit.org/show_bug.cgi?id=126444
3527         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotientThenRemainderGPR, divisorGPR, multiplyAnswerGPR));
3528         m_jit.assembler().sub<32>(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3529
3530         // If the user cares about negative zero, then speculate that we're not about
3531         // to produce negative zero.
3532         if (shouldCheckNegativeZero(node->arithMode())) {
3533             // Check that we're not about to create negative zero.
3534             JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3535             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, quotientThenRemainderGPR));
3536             numeratorPositive.link(&m_jit);
3537         }
3538
3539         done.link(&m_jit);
3540
3541         int32Result(quotientThenRemainderGPR, node);
3542 #else // not architecture that can do integer division
3543         RELEASE_ASSERT_NOT_REACHED();
3544 #endif
3545         return;
3546     }
3547         
3548     case NumberUse: {
3549         SpeculateDoubleOperand op1(this, node->child1());
3550         SpeculateDoubleOperand op2(this, node->child2());
3551         
3552         FPRReg op1FPR = op1.fpr();
3553         FPRReg op2FPR = op2.fpr();
3554         
3555         flushRegisters();
3556         
3557         FPRResult result(this);
3558         
3559         callOperation(fmodAsDFGOperation, result.fpr(), op1FPR, op2FPR);
3560         
3561         doubleResult(result.fpr(), node);
3562         return;
3563     }
3564         
3565     default:
3566         RELEASE_ASSERT_NOT_REACHED();
3567         return;
3568     }
3569 }
3570
3571 // Returns true if the compare is fused with a subsequent branch.
3572 bool SpeculativeJIT::compare(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
3573 {
3574     if (compilePeepHoleBranch(node, condition, doubleCondition, operation))
3575         return true;
3576
3577     if (node->isBinaryUseKind(Int32Use)) {
3578         compileInt32Compare(node, condition);
3579         return false;
3580     }
3581     
3582 #if USE(JSVALUE64)
3583     if (node->isBinaryUseKind(MachineIntUse)) {
3584         compileInt52Compare(node, condition);
3585         return false;
3586     }
3587 #endif // USE(JSVALUE64)
3588     
3589     if (node->isBinaryUseKind(NumberUse)) {
3590         compileDoubleCompare(node, doubleCondition);
3591         return false;
3592     }
3593     
3594     if (node->op() == CompareEq) {
3595         if (node->isBinaryUseKind(StringUse)) {
3596             compileStringEquality(node);
3597             return false;
3598         }
3599         
3600         if (node->isBinaryUseKind(BooleanUse)) {
3601             compileBooleanCompare(node, condition);
3602             return false;
3603         }
3604
3605         if (node->isBinaryUseKind(StringIdentUse)) {
3606             compileStringIdentEquality(node);
3607             return false;
3608         }
3609         
3610         if (node->isBinaryUseKind(ObjectUse)) {
3611             compileObjectEquality(node);
3612             return false;
3613         }
3614         
3615         if (node->child1().useKind() == ObjectUse && node->child2().useKind() == ObjectOrOtherUse) {
3616             compileObjectToObjectOrOtherEquality(node->child1(), node->child2());
3617             return false;
3618         }
3619         
3620         if (node->child1().useKind() == ObjectOrOtherUse && node->child2().useKind() == ObjectUse) {
3621             compileObjectToObjectOrOtherEquality(node->child2(), node->child1());
3622             return false;
3623         }
3624     }
3625     
3626     nonSpeculativeNonPeepholeCompare(node, condition, operation);
3627     return false;
3628 }
3629
3630 bool SpeculativeJIT::compileStrictEqForConstant(Node* node, Edge value, JSValue constant)
3631 {
3632     JSValueOperand op1(this, value);
3633     
3634     // FIXME: This code is wrong for the case that the constant is null or undefined,
3635     // and the value is an object that MasqueradesAsUndefined.
3636     // https://bugs.webkit.org/show_bug.cgi?id=109487
3637     
3638     unsigned branchIndexInBlock = detectPeepHoleBranch();
3639     if (branchIndexInBlock != UINT_MAX) {
3640         Node* branchNode = m_block->at(branchIndexInBlock);
3641         BasicBlock* taken = branchNode->takenBlock();
3642         BasicBlock* notTaken = branchNode->notTakenBlock();
3643         MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
3644         
3645         // The branch instruction will branch to the taken block.
3646         // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
3647         if (taken == nextBlock()) {
3648             condition = MacroAssembler::NotEqual;
3649             BasicBlock* tmp = taken;
3650             taken = notTaken;
3651             notTaken = tmp;
3652         }
3653
3654 #if USE(JSVALUE64)
3655         branch64(condition, op1.gpr(), MacroAssembler::TrustedImm64(JSValue::encode(constant)), taken);
3656 #else
3657         GPRReg payloadGPR = op1.payloadGPR();
3658         GPRReg tagGPR = op1.tagGPR();
3659         if (condition == MacroAssembler::Equal) {
3660             // Drop down if not equal, go elsewhere if equal.
3661             MacroAssembler::Jump notEqual = m_jit.branch32(MacroAssembler::NotEqual, tagGPR, MacroAssembler::Imm32(constant.tag()));
3662             branch32(MacroAssembler::Equal, payloadGPR, MacroAssembler::Imm32(constant.payload()), taken);
3663             notEqual.link(&m_jit);
3664         } else {
3665             // Drop down if equal, go elsehwere if not equal.
3666             branch32(MacroAssembler::NotEqual, tagGPR, MacroAssembler::Imm32(constant.tag()), taken);
3667             branch32(MacroAssembler::NotEqual, payloadGPR, MacroAssembler::Imm32(constant.payload()), taken);
3668         }
3669 #endif
3670         
3671         jump(notTaken);
3672         
3673         use(node->child1());
3674         use(node->child2());
3675         m_indexInBlock = branchIndexInBlock;
3676         m_currentNode = branchNode;
3677         return true;
3678     }
3679     
3680     GPRTemporary result(this);
3681     
3682 #if USE(JSVALUE64)
3683     GPRReg op1GPR = op1.gpr();
3684     GPRReg resultGPR = result.gpr();
3685     m_jit.move(MacroAssembler::TrustedImm64(ValueFalse), resultGPR);
3686     MacroAssembler::Jump notEqual = m_jit.branch64(MacroAssembler::NotEqual, op1GPR, MacroAssembler::TrustedImm64(JSValue::encode(constant)));
3687     m_jit.or32(MacroAssembler::TrustedImm32(1), resultGPR);
3688     notEqual.link(&m_jit);
3689     jsValueResult(resultGPR, node, DataFormatJSBoolean);
3690 #else
3691     GPRReg op1PayloadGPR = op1.payloadGPR();
3692     GPRReg op1TagGPR = op1.tagGPR();
3693     GPRReg resultGPR = result.gpr();
3694     m_jit.move(TrustedImm32(0), resultGPR);
3695     MacroAssembler::JumpList notEqual;
3696     notEqual.append(m_jit.branch32(MacroAssembler::NotEqual, op1TagGPR, MacroAssembler::Imm32(constant.tag())));
3697     notEqual.append(m_jit.branch32(MacroAssembler::NotEqual, op1PayloadGPR, MacroAssembler::Imm32(constant.payload())));
3698     m_jit.move(TrustedImm32(1), resultGPR);
3699     notEqual.link(&m_jit);
3700     booleanResult(resultGPR, node);
3701 #endif
3702     
3703     return false;
3704 }
3705
3706 bool SpeculativeJIT::compileStrictEq(Node* node)
3707 {
3708     switch (node->binaryUseKind()) {
3709     case BooleanUse: {
3710         unsigned branchIndexInBlock = detectPeepHoleBranch();
3711         if (branchIndexInBlock != UINT_MAX) {
3712             Node* branchNode = m_block->at(branchIndexInBlock);
3713             compilePeepHoleBooleanBranch(node, branchNode, MacroAssembler::Equal);
3714             use(node->child1());
3715             use(node->child2());
3716             m_indexInBlock = branchIndexInBlock;
3717             m_currentNode = branchNode;
3718             return true;
3719         }
3720         compileBooleanCompare(node, MacroAssembler::Equal);
3721         return false;
3722     }
3723
3724     case Int32Use: {
3725         unsigned branchIndexInBlock = detectPeepHoleBranch();
3726         if (branchIndexInBlock != UINT_MAX) {
3727             Node* branchNode = m_block->at(branchIndexInBlock);
3728             compilePeepHoleInt32Branch(node, branchNode, MacroAssembler::Equal);
3729             use(node->child1());
3730             use(node->child2());
3731             m_indexInBlock = branchIndexInBlock;
3732             m_currentNode = branchNode;
3733             return true;
3734         }
3735         compileInt32Compare(node, MacroAssembler::Equal);
3736         return false;
3737     }
3738     
3739 #if USE(JSVALUE64)   
3740     case MachineIntUse: {
3741         unsigned branchIndexInBlock = detectPeepHoleBranch();
3742         if (branchIndexInBlock != UINT_MAX) {
3743             Node* branchNode = m_block->at(branchIndexInBlock);
3744             compilePeepHoleInt52Branch(node, branchNode, MacroAssembler::Equal);
3745             use(node->child1());
3746             use(node->child2());
3747             m_indexInBlock = branchIndexInBlock;
3748             m_currentNode = branchNode;
3749             return true;
3750         }
3751         compileInt52Compare(node, MacroAssembler::Equal);
3752         return false;
3753     }
3754 #endif // USE(JSVALUE64)
3755         
3756     case NumberUse: {
3757         unsigned branchIndexInBlock = detectPeepHoleBranch();
3758         if (branchIndexInBlock != UINT_MAX) {
3759             Node* branchNode = m_block->at(branchIndexInBlock);
3760             compilePeepHoleDoubleBranch(node, branchNode, MacroAssembler::DoubleEqual);
3761             use(node->child1());
3762             use(node->child2());
3763             m_indexInBlock = branchIndexInBlock;
3764             m_currentNode = branchNode;
3765             return true;
3766         }
3767         compileDoubleCompare(node, MacroAssembler::DoubleEqual);
3768         return false;
3769     }
3770         
3771     case StringUse: {
3772         compileStringEquality(node);
3773         return false;
3774     }
3775         
3776     case StringIdentUse: {
3777         compileStringIdentEquality(node);
3778         return false;
3779     }
3780         
3781     case ObjectUse: {
3782         unsigned branchIndexInBlock = detectPeepHoleBranch();
3783         if (branchIndexInBlock != UINT_MAX) {
3784             Node* branchNode = m_block->at(branchIndexInBlock);
3785             compilePeepHoleObjectEquality(node, branchNode);
3786             use(node->child1());
3787             use(node->child2());
3788             m_indexInBlock = branchIndexInBlock;
3789             m_currentNode = branchNode;
3790             return true;