617acaa67eec30a824f6f318bb1b423a71adc1b4
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "Arguments.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGBinarySwitch.h"
35 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
36 #include "DFGSaneStringGetByValSlowPathGenerator.h"
37 #include "DFGSlowPathGenerator.h"
38 #include "JSCJSValueInlines.h"
39 #include "LinkBuffer.h"
40 #include <wtf/MathExtras.h>
41
42 namespace JSC { namespace DFG {
43
44 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
45     : m_compileOkay(true)
46     , m_jit(jit)
47     , m_currentNode(0)
48     , m_indexInBlock(0)
49     , m_generationInfo(m_jit.graph().frameRegisterCount())
50     , m_state(m_jit.graph())
51     , m_interpreter(m_jit.graph(), m_state)
52     , m_stream(&jit.jitCode()->variableEventStream)
53     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
54     , m_isCheckingArgumentTypes(false)
55 {
56 }
57
58 SpeculativeJIT::~SpeculativeJIT()
59 {
60 }
61
62 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
63 {
64     ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
65     
66     GPRTemporary scratch(this);
67     GPRTemporary scratch2(this);
68     GPRReg scratchGPR = scratch.gpr();
69     GPRReg scratch2GPR = scratch2.gpr();
70     
71     unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
72     
73     JITCompiler::JumpList slowCases;
74     
75     slowCases.append(
76         emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
77     m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
78     emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
79     
80     m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
81     m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
82     
83     if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
84 #if USE(JSVALUE64)
85         m_jit.move(TrustedImm64(bitwise_cast<int64_t>(QNaN)), scratchGPR);
86         for (unsigned i = numElements; i < vectorLength; ++i)
87             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
88 #else
89         EncodedValueDescriptor value;
90         value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, QNaN));
91         for (unsigned i = numElements; i < vectorLength; ++i) {
92             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
93             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
94         }
95 #endif
96     }
97     
98     // I want a slow path that also loads out the storage pointer, and that's
99     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
100     // of work for a very small piece of functionality. :-/
101     addSlowPathGenerator(adoptPtr(
102         new CallArrayAllocatorSlowPathGenerator(
103             slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
104             structure, numElements)));
105 }
106
107 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
108 {
109     if (!m_compileOkay)
110         return;
111     ASSERT(m_isCheckingArgumentTypes || m_canExit);
112     m_jit.appendExitInfo(jumpToFail);
113     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
114 }
115
116 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
117 {
118     if (!m_compileOkay)
119         return;
120     ASSERT(m_isCheckingArgumentTypes || m_canExit);
121     m_jit.appendExitInfo(jumpsToFail);
122     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
123 }
124
125 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
126 {
127     if (!m_compileOkay)
128         return;
129     backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail);
130     if (m_speculationDirection == ForwardSpeculation)
131         convertLastOSRExitToForward();
132 }
133
134 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
135 {
136     ASSERT(m_isCheckingArgumentTypes || m_canExit);
137     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
138 }
139
140 OSRExitJumpPlaceholder SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
141 {
142     if (!m_compileOkay)
143         return OSRExitJumpPlaceholder();
144     ASSERT(m_isCheckingArgumentTypes || m_canExit);
145     unsigned index = m_jit.jitCode()->osrExit.size();
146     m_jit.appendExitInfo();
147     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
148     return OSRExitJumpPlaceholder(index);
149 }
150
151 OSRExitJumpPlaceholder SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
152 {
153     ASSERT(m_isCheckingArgumentTypes || m_canExit);
154     return backwardSpeculationCheck(kind, jsValueSource, nodeUse.node());
155 }
156
157 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
158 {
159     if (!m_compileOkay)
160         return;
161     backwardSpeculationCheck(kind, jsValueSource, node, jumpsToFail);
162     if (m_speculationDirection == ForwardSpeculation)
163         convertLastOSRExitToForward();
164 }
165
166 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
167 {
168     ASSERT(m_isCheckingArgumentTypes || m_canExit);
169     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
170 }
171
172 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
173 {
174     if (!m_compileOkay)
175         return;
176     ASSERT(m_isCheckingArgumentTypes || m_canExit);
177     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
178     m_jit.appendExitInfo(jumpToFail);
179     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
180 }
181
182 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
183 {
184     ASSERT(m_isCheckingArgumentTypes || m_canExit);
185     backwardSpeculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
186 }
187
188 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
189 {
190     if (!m_compileOkay)
191         return;
192     backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail, recovery);
193     if (m_speculationDirection == ForwardSpeculation)
194         convertLastOSRExitToForward();
195 }
196
197 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge edge, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
198 {
199     speculationCheck(kind, jsValueSource, edge.node(), jumpToFail, recovery);
200 }
201
202 void SpeculativeJIT::emitInvalidationPoint(Node* node)
203 {
204     if (!m_compileOkay)
205         return;
206     ASSERT(m_canExit);
207     ASSERT(m_speculationDirection == BackwardSpeculation);
208     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
209     m_jit.jitCode()->appendOSRExit(OSRExit(
210         UncountableInvalidation, JSValueSource(),
211         m_jit.graph().methodOfGettingAValueProfileFor(node),
212         this, m_stream->size()));
213     info.m_replacementSource = m_jit.watchpointLabel();
214     ASSERT(info.m_replacementSource.isSet());
215     noResult(node);
216 }
217
218 void SpeculativeJIT::convertLastOSRExitToForward(const ValueRecovery& valueRecovery)
219 {
220     m_jit.jitCode()->lastOSRExit().convertToForward(
221         m_block, m_currentNode, m_indexInBlock, valueRecovery);
222 }
223
224 void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const ValueRecovery& valueRecovery)
225 {
226     ASSERT(m_isCheckingArgumentTypes || m_canExit);
227     backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail);
228     convertLastOSRExitToForward(valueRecovery);
229 }
230
231 void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail, const ValueRecovery& valueRecovery)
232 {
233     ASSERT(m_isCheckingArgumentTypes || m_canExit);
234     backwardSpeculationCheck(kind, jsValueSource, node, jumpsToFail);
235     convertLastOSRExitToForward(valueRecovery);
236 }
237
238 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
239 {
240     ASSERT(m_isCheckingArgumentTypes || m_canExit);
241     if (!m_compileOkay)
242         return;
243     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
244     m_compileOkay = false;
245 }
246
247 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
248 {
249     ASSERT(m_isCheckingArgumentTypes || m_canExit);
250     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
251 }
252
253 void SpeculativeJIT::backwardTypeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
254 {
255     ASSERT(needsTypeCheck(edge, typesPassedThrough));
256     m_interpreter.filter(edge, typesPassedThrough);
257     backwardSpeculationCheck(BadType, source, edge.node(), jumpToFail);
258 }
259
260 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
261 {
262     backwardTypeCheck(source, edge, typesPassedThrough, jumpToFail);
263     if (m_speculationDirection == ForwardSpeculation)
264         convertLastOSRExitToForward();
265 }
266
267 void SpeculativeJIT::forwardTypeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, const ValueRecovery& valueRecovery)
268 {
269     backwardTypeCheck(source, edge, typesPassedThrough, jumpToFail);
270     convertLastOSRExitToForward(valueRecovery);
271 }
272
273 RegisterSet SpeculativeJIT::usedRegisters()
274 {
275     RegisterSet result;
276     
277     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
278         GPRReg gpr = GPRInfo::toRegister(i);
279         if (m_gprs.isInUse(gpr))
280             result.set(gpr);
281     }
282     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
283         FPRReg fpr = FPRInfo::toRegister(i);
284         if (m_fprs.isInUse(fpr))
285             result.set(fpr);
286     }
287     
288     result.merge(RegisterSet::specialRegisters());
289     
290     return result;
291 }
292
293 void SpeculativeJIT::addSlowPathGenerator(PassOwnPtr<SlowPathGenerator> slowPathGenerator)
294 {
295     m_slowPathGenerators.append(slowPathGenerator);
296 }
297
298 void SpeculativeJIT::runSlowPathGenerators()
299 {
300     for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
301         m_slowPathGenerators[i]->generate(this);
302 }
303
304 // On Windows we need to wrap fmod; on other platforms we can call it directly.
305 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
306 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
307 static double JIT_OPERATION fmodAsDFGOperation(double x, double y)
308 {
309     return fmod(x, y);
310 }
311 #else
312 #define fmodAsDFGOperation fmod
313 #endif
314
315 void SpeculativeJIT::clearGenerationInfo()
316 {
317     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
318         m_generationInfo[i] = GenerationInfo();
319     m_gprs = RegisterBank<GPRInfo>();
320     m_fprs = RegisterBank<FPRInfo>();
321 }
322
323 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
324 {
325     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
326     Node* node = info.node();
327     DataFormat registerFormat = info.registerFormat();
328     ASSERT(registerFormat != DataFormatNone);
329     ASSERT(registerFormat != DataFormatDouble);
330         
331     SilentSpillAction spillAction;
332     SilentFillAction fillAction;
333         
334     if (!info.needsSpill())
335         spillAction = DoNothingForSpill;
336     else {
337 #if USE(JSVALUE64)
338         ASSERT(info.gpr() == source);
339         if (registerFormat == DataFormatInt32)
340             spillAction = Store32Payload;
341         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
342             spillAction = StorePtr;
343         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
344             spillAction = Store64;
345         else {
346             ASSERT(registerFormat & DataFormatJS);
347             spillAction = Store64;
348         }
349 #elif USE(JSVALUE32_64)
350         if (registerFormat & DataFormatJS) {
351             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
352             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
353         } else {
354             ASSERT(info.gpr() == source);
355             spillAction = Store32Payload;
356         }
357 #endif
358     }
359         
360     if (registerFormat == DataFormatInt32) {
361         ASSERT(info.gpr() == source);
362         ASSERT(isJSInt32(info.registerFormat()));
363         if (node->hasConstant()) {
364             ASSERT(isInt32Constant(node));
365             fillAction = SetInt32Constant;
366         } else
367             fillAction = Load32Payload;
368     } else if (registerFormat == DataFormatBoolean) {
369 #if USE(JSVALUE64)
370         RELEASE_ASSERT_NOT_REACHED();
371         fillAction = DoNothingForFill;
372 #elif USE(JSVALUE32_64)
373         ASSERT(info.gpr() == source);
374         if (node->hasConstant()) {
375             ASSERT(isBooleanConstant(node));
376             fillAction = SetBooleanConstant;
377         } else
378             fillAction = Load32Payload;
379 #endif
380     } else if (registerFormat == DataFormatCell) {
381         ASSERT(info.gpr() == source);
382         if (node->hasConstant()) {
383             JSValue value = valueOfJSConstant(node);
384             ASSERT_UNUSED(value, value.isCell());
385             fillAction = SetCellConstant;
386         } else {
387 #if USE(JSVALUE64)
388             fillAction = LoadPtr;
389 #else
390             fillAction = Load32Payload;
391 #endif
392         }
393     } else if (registerFormat == DataFormatStorage) {
394         ASSERT(info.gpr() == source);
395         fillAction = LoadPtr;
396     } else if (registerFormat == DataFormatInt52) {
397         if (node->hasConstant())
398             fillAction = SetInt52Constant;
399         else if (isJSInt32(info.spillFormat()) || info.spillFormat() == DataFormatJS)
400             fillAction = Load32PayloadConvertToInt52;
401         else if (info.spillFormat() == DataFormatInt52)
402             fillAction = Load64;
403         else if (info.spillFormat() == DataFormatStrictInt52)
404             fillAction = Load64ShiftInt52Left;
405         else if (info.spillFormat() == DataFormatNone)
406             fillAction = Load64;
407         else {
408             // Should never happen. Anything that qualifies as an int32 will never
409             // be turned into a cell (immediate spec fail) or a double (to-double
410             // conversions involve a separate node).
411             RELEASE_ASSERT_NOT_REACHED();
412             fillAction = Load64; // Make GCC happy.
413         }
414     } else if (registerFormat == DataFormatStrictInt52) {
415         if (node->hasConstant())
416             fillAction = SetStrictInt52Constant;
417         else if (isJSInt32(info.spillFormat()) || info.spillFormat() == DataFormatJS)
418             fillAction = Load32PayloadSignExtend;
419         else if (info.spillFormat() == DataFormatInt52)
420             fillAction = Load64ShiftInt52Right;
421         else if (info.spillFormat() == DataFormatStrictInt52)
422             fillAction = Load64;
423         else if (info.spillFormat() == DataFormatNone)
424             fillAction = Load64;
425         else {
426             // Should never happen. Anything that qualifies as an int32 will never
427             // be turned into a cell (immediate spec fail) or a double (to-double
428             // conversions involve a separate node).
429             RELEASE_ASSERT_NOT_REACHED();
430             fillAction = Load64; // Make GCC happy.
431         }
432     } else {
433         ASSERT(registerFormat & DataFormatJS);
434 #if USE(JSVALUE64)
435         ASSERT(info.gpr() == source);
436         if (node->hasConstant()) {
437             if (valueOfJSConstant(node).isCell())
438                 fillAction = SetTrustedJSConstant;
439                 fillAction = SetJSConstant;
440         } else if (info.spillFormat() == DataFormatInt32) {
441             ASSERT(registerFormat == DataFormatJSInt32);
442             fillAction = Load32PayloadBoxInt;
443         } else if (info.spillFormat() == DataFormatDouble) {
444             ASSERT(registerFormat == DataFormatJSDouble);
445             fillAction = LoadDoubleBoxDouble;
446         } else
447             fillAction = Load64;
448 #else
449         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
450         if (node->hasConstant())
451             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
452         else if (info.payloadGPR() == source)
453             fillAction = Load32Payload;
454         else { // Fill the Tag
455             switch (info.spillFormat()) {
456             case DataFormatInt32:
457                 ASSERT(registerFormat == DataFormatJSInt32);
458                 fillAction = SetInt32Tag;
459                 break;
460             case DataFormatCell:
461                 ASSERT(registerFormat == DataFormatJSCell);
462                 fillAction = SetCellTag;
463                 break;
464             case DataFormatBoolean:
465                 ASSERT(registerFormat == DataFormatJSBoolean);
466                 fillAction = SetBooleanTag;
467                 break;
468             default:
469                 fillAction = Load32Tag;
470                 break;
471             }
472         }
473 #endif
474     }
475         
476     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
477 }
478     
479 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
480 {
481     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
482     Node* node = info.node();
483     ASSERT(info.registerFormat() == DataFormatDouble);
484
485     SilentSpillAction spillAction;
486     SilentFillAction fillAction;
487         
488     if (!info.needsSpill())
489         spillAction = DoNothingForSpill;
490     else {
491         ASSERT(!node->hasConstant());
492         ASSERT(info.spillFormat() == DataFormatNone);
493         ASSERT(info.fpr() == source);
494         spillAction = StoreDouble;
495     }
496         
497 #if USE(JSVALUE64)
498     if (node->hasConstant()) {
499         ASSERT(isNumberConstant(node));
500         fillAction = SetDoubleConstant;
501     } else if (info.spillFormat() != DataFormatNone && info.spillFormat() != DataFormatDouble) {
502         // it was already spilled previously and not as a double, which means we need unboxing.
503         ASSERT(info.spillFormat() & DataFormatJS);
504         fillAction = LoadJSUnboxDouble;
505     } else
506         fillAction = LoadDouble;
507 #elif USE(JSVALUE32_64)
508     ASSERT(info.registerFormat() == DataFormatDouble || info.registerFormat() == DataFormatJSDouble);
509     if (node->hasConstant()) {
510         ASSERT(isNumberConstant(node));
511         fillAction = SetDoubleConstant;
512     } else
513         fillAction = LoadDouble;
514 #endif
515
516     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
517 }
518     
519 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
520 {
521     switch (plan.spillAction()) {
522     case DoNothingForSpill:
523         break;
524     case Store32Tag:
525         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
526         break;
527     case Store32Payload:
528         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
529         break;
530     case StorePtr:
531         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
532         break;
533 #if USE(JSVALUE64)
534     case Store64:
535         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
536         break;
537 #endif
538     case StoreDouble:
539         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
540         break;
541     default:
542         RELEASE_ASSERT_NOT_REACHED();
543     }
544 }
545     
546 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
547 {
548 #if USE(JSVALUE32_64)
549     UNUSED_PARAM(canTrample);
550 #endif
551     switch (plan.fillAction()) {
552     case DoNothingForFill:
553         break;
554     case SetInt32Constant:
555         m_jit.move(Imm32(valueOfInt32Constant(plan.node())), plan.gpr());
556         break;
557 #if USE(JSVALUE64)
558     case SetInt52Constant:
559         m_jit.move(Imm64(valueOfJSConstant(plan.node()).asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
560         break;
561     case SetStrictInt52Constant:
562         m_jit.move(Imm64(valueOfJSConstant(plan.node()).asMachineInt()), plan.gpr());
563         break;
564 #endif // USE(JSVALUE64)
565     case SetBooleanConstant:
566         m_jit.move(TrustedImm32(valueOfBooleanConstant(plan.node())), plan.gpr());
567         break;
568     case SetCellConstant:
569         m_jit.move(TrustedImmPtr(valueOfJSConstant(plan.node()).asCell()), plan.gpr());
570         break;
571 #if USE(JSVALUE64)
572     case SetTrustedJSConstant:
573         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
574         break;
575     case SetJSConstant:
576         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
577         break;
578     case SetDoubleConstant:
579         m_jit.move(Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(plan.node()))), canTrample);
580         m_jit.move64ToDouble(canTrample, plan.fpr());
581         break;
582     case Load32PayloadBoxInt:
583         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
584         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
585         break;
586     case Load32PayloadConvertToInt52:
587         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
588         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
589         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
590         break;
591     case Load32PayloadSignExtend:
592         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
593         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
594         break;
595     case LoadDoubleBoxDouble:
596         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
597         m_jit.sub64(GPRInfo::tagTypeNumberRegister, plan.gpr());
598         break;
599     case LoadJSUnboxDouble:
600         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), canTrample);
601         unboxDouble(canTrample, plan.fpr());
602         break;
603 #else
604     case SetJSConstantTag:
605         m_jit.move(Imm32(valueOfJSConstant(plan.node()).tag()), plan.gpr());
606         break;
607     case SetJSConstantPayload:
608         m_jit.move(Imm32(valueOfJSConstant(plan.node()).payload()), plan.gpr());
609         break;
610     case SetInt32Tag:
611         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
612         break;
613     case SetCellTag:
614         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
615         break;
616     case SetBooleanTag:
617         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
618         break;
619     case SetDoubleConstant:
620         m_jit.loadDouble(addressOfDoubleConstant(plan.node()), plan.fpr());
621         break;
622 #endif
623     case Load32Tag:
624         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
625         break;
626     case Load32Payload:
627         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
628         break;
629     case LoadPtr:
630         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
631         break;
632 #if USE(JSVALUE64)
633     case Load64:
634         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
635         break;
636     case Load64ShiftInt52Right:
637         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
638         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
639         break;
640     case Load64ShiftInt52Left:
641         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
642         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
643         break;
644 #endif
645     case LoadDouble:
646         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
647         break;
648     default:
649         RELEASE_ASSERT_NOT_REACHED();
650     }
651 }
652     
653 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
654 {
655     switch (arrayMode.arrayClass()) {
656     case Array::OriginalArray: {
657         CRASH();
658         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
659         return result;
660     }
661         
662     case Array::Array:
663         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
664         return m_jit.branch32(
665             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
666         
667     case Array::NonArray:
668     case Array::OriginalNonArray:
669         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
670         return m_jit.branch32(
671             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
672         
673     case Array::PossiblyArray:
674         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
675         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
676     }
677     
678     RELEASE_ASSERT_NOT_REACHED();
679     return JITCompiler::Jump();
680 }
681
682 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
683 {
684     JITCompiler::JumpList result;
685     
686     switch (arrayMode.type()) {
687     case Array::Int32:
688         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
689
690     case Array::Double:
691         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
692
693     case Array::Contiguous:
694         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
695
696     case Array::ArrayStorage:
697     case Array::SlowPutArrayStorage: {
698         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
699         
700         if (arrayMode.isJSArray()) {
701             if (arrayMode.isSlowPut()) {
702                 result.append(
703                     m_jit.branchTest32(
704                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
705                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
706                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
707                 result.append(
708                     m_jit.branch32(
709                         MacroAssembler::Above, tempGPR,
710                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
711                 break;
712             }
713             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
714             result.append(
715                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
716             break;
717         }
718         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
719         if (arrayMode.isSlowPut()) {
720             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
721             result.append(
722                 m_jit.branch32(
723                     MacroAssembler::Above, tempGPR,
724                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
725             break;
726         }
727         result.append(
728             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
729         break;
730     }
731     default:
732         CRASH();
733         break;
734     }
735     
736     return result;
737 }
738
739 void SpeculativeJIT::checkArray(Node* node)
740 {
741     ASSERT(node->arrayMode().isSpecific());
742     ASSERT(!node->arrayMode().doesConversion());
743     
744     SpeculateCellOperand base(this, node->child1());
745     GPRReg baseReg = base.gpr();
746     
747     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
748         noResult(m_currentNode);
749         return;
750     }
751     
752     const ClassInfo* expectedClassInfo = 0;
753     
754     switch (node->arrayMode().type()) {
755     case Array::String:
756         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
757         break;
758     case Array::Int32:
759     case Array::Double:
760     case Array::Contiguous:
761     case Array::ArrayStorage:
762     case Array::SlowPutArrayStorage: {
763         GPRTemporary temp(this);
764         GPRReg tempGPR = temp.gpr();
765         m_jit.loadPtr(
766             MacroAssembler::Address(baseReg, JSCell::structureOffset()), tempGPR);
767         m_jit.load8(MacroAssembler::Address(tempGPR, Structure::indexingTypeOffset()), tempGPR);
768         speculationCheck(
769             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
770             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
771         
772         noResult(m_currentNode);
773         return;
774     }
775     case Array::Arguments:
776         expectedClassInfo = Arguments::info();
777         break;
778     default:
779         expectedClassInfo = classInfoForType(node->arrayMode().typedArrayType());
780         break;
781     }
782     
783     RELEASE_ASSERT(expectedClassInfo);
784     
785     GPRTemporary temp(this);
786     m_jit.loadPtr(
787         MacroAssembler::Address(baseReg, JSCell::structureOffset()), temp.gpr());
788     speculationCheck(
789         BadType, JSValueSource::unboxedCell(baseReg), node,
790         m_jit.branchPtr(
791             MacroAssembler::NotEqual,
792             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
793             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
794     
795     noResult(m_currentNode);
796 }
797
798 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
799 {
800     ASSERT(node->arrayMode().doesConversion());
801     
802     GPRTemporary temp(this);
803     GPRTemporary structure;
804     GPRReg tempGPR = temp.gpr();
805     GPRReg structureGPR = InvalidGPRReg;
806     
807     if (node->op() != ArrayifyToStructure) {
808         GPRTemporary realStructure(this);
809         structure.adopt(realStructure);
810         structureGPR = structure.gpr();
811     }
812         
813     // We can skip all that comes next if we already have array storage.
814     MacroAssembler::JumpList slowPath;
815     
816     if (node->op() == ArrayifyToStructure) {
817         slowPath.append(m_jit.branchWeakPtr(
818             JITCompiler::NotEqual,
819             JITCompiler::Address(baseReg, JSCell::structureOffset()),
820             node->structure()));
821     } else {
822         m_jit.loadPtr(
823             MacroAssembler::Address(baseReg, JSCell::structureOffset()), structureGPR);
824         
825         m_jit.load8(
826             MacroAssembler::Address(structureGPR, Structure::indexingTypeOffset()), tempGPR);
827         
828         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
829     }
830     
831     addSlowPathGenerator(adoptPtr(new ArrayifySlowPathGenerator(
832         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR)));
833     
834     noResult(m_currentNode);
835 }
836
837 void SpeculativeJIT::arrayify(Node* node)
838 {
839     ASSERT(node->arrayMode().isSpecific());
840     
841     SpeculateCellOperand base(this, node->child1());
842     
843     if (!node->child2()) {
844         arrayify(node, base.gpr(), InvalidGPRReg);
845         return;
846     }
847     
848     SpeculateInt32Operand property(this, node->child2());
849     
850     arrayify(node, base.gpr(), property.gpr());
851 }
852
853 GPRReg SpeculativeJIT::fillStorage(Edge edge)
854 {
855     VirtualRegister virtualRegister = edge->virtualRegister();
856     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
857     
858     switch (info.registerFormat()) {
859     case DataFormatNone: {
860         if (info.spillFormat() == DataFormatStorage) {
861             GPRReg gpr = allocate();
862             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
863             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
864             info.fillStorage(*m_stream, gpr);
865             return gpr;
866         }
867         
868         // Must be a cell; fill it as a cell and then return the pointer.
869         return fillSpeculateCell(edge);
870     }
871         
872     case DataFormatStorage: {
873         GPRReg gpr = info.gpr();
874         m_gprs.lock(gpr);
875         return gpr;
876     }
877         
878     default:
879         return fillSpeculateCell(edge);
880     }
881 }
882
883 void SpeculativeJIT::useChildren(Node* node)
884 {
885     if (node->flags() & NodeHasVarArgs) {
886         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
887             if (!!m_jit.graph().m_varArgChildren[childIdx])
888                 use(m_jit.graph().m_varArgChildren[childIdx]);
889         }
890     } else {
891         Edge child1 = node->child1();
892         if (!child1) {
893             ASSERT(!node->child2() && !node->child3());
894             return;
895         }
896         use(child1);
897         
898         Edge child2 = node->child2();
899         if (!child2) {
900             ASSERT(!node->child3());
901             return;
902         }
903         use(child2);
904         
905         Edge child3 = node->child3();
906         if (!child3)
907             return;
908         use(child3);
909     }
910 }
911
912 void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind useKind, GPRReg scratch1, GPRReg scratch2)
913 {
914     UNUSED_PARAM(ownerGPR);
915     UNUSED_PARAM(valueGPR);
916     UNUSED_PARAM(scratch1);
917     UNUSED_PARAM(scratch2);
918     UNUSED_PARAM(useKind);
919
920     if (isKnownNotCell(valueUse.node()))
921         return;
922
923 #if ENABLE(WRITE_BARRIER_PROFILING)
924     JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
925 #endif
926 }
927
928 void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, JSCell* value, WriteBarrierUseKind useKind, GPRReg scratch1, GPRReg scratch2)
929 {
930     UNUSED_PARAM(ownerGPR);
931     UNUSED_PARAM(value);
932     UNUSED_PARAM(scratch1);
933     UNUSED_PARAM(scratch2);
934     UNUSED_PARAM(useKind);
935     
936     if (Heap::isMarked(value))
937         return;
938
939 #if ENABLE(WRITE_BARRIER_PROFILING)
940     JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
941 #endif
942 }
943
944 void SpeculativeJIT::writeBarrier(JSCell* owner, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind useKind, GPRReg scratch)
945 {
946     UNUSED_PARAM(owner);
947     UNUSED_PARAM(valueGPR);
948     UNUSED_PARAM(scratch);
949     UNUSED_PARAM(useKind);
950
951     if (isKnownNotCell(valueUse.node()))
952         return;
953
954 #if ENABLE(WRITE_BARRIER_PROFILING)
955     JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
956 #endif
957 }
958
959 void SpeculativeJIT::compileIn(Node* node)
960 {
961     SpeculateCellOperand base(this, node->child2());
962     GPRReg baseGPR = base.gpr();
963         
964     if (isConstant(node->child1().node())) {
965         JSString* string =
966             jsDynamicCast<JSString*>(valueOfJSConstant(node->child1().node()));
967         if (string && string->tryGetValueImpl()
968             && string->tryGetValueImpl()->isIdentifier()) {
969             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo();
970             
971             GPRTemporary result(this);
972             GPRReg resultGPR = result.gpr();
973
974             use(node->child1());
975                 
976             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
977             
978             OwnPtr<SlowPathGenerator> slowPath = slowPathCall(
979                 jump.m_jump, this, operationInOptimize,
980                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
981                 string->tryGetValueImpl());
982             
983             stubInfo->codeOrigin = node->codeOrigin;
984             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
985             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
986             stubInfo->patch.usedRegisters = usedRegisters();
987             stubInfo->patch.registersFlushed = false;
988             
989             m_jit.addIn(InRecord(jump, slowPath.get(), stubInfo));
990             addSlowPathGenerator(slowPath.release());
991                 
992             base.use();
993                 
994 #if USE(JSVALUE64)
995             jsValueResult(
996                 resultGPR, node, DataFormatJSBoolean, UseChildrenCalledExplicitly);
997 #else
998             booleanResult(resultGPR, node, UseChildrenCalledExplicitly);
999 #endif
1000             return;
1001         }
1002     }
1003         
1004     JSValueOperand key(this, node->child1());
1005     JSValueRegs regs = key.jsValueRegs();
1006         
1007     GPRResult result(this);
1008     GPRReg resultGPR = result.gpr();
1009         
1010     base.use();
1011     key.use();
1012         
1013     flushRegisters();
1014     callOperation(
1015         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
1016         baseGPR, regs);
1017 #if USE(JSVALUE64)
1018     jsValueResult(resultGPR, node, DataFormatJSBoolean, UseChildrenCalledExplicitly);
1019 #else
1020     booleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1021 #endif
1022 }
1023
1024 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1025 {
1026     unsigned branchIndexInBlock = detectPeepHoleBranch();
1027     if (branchIndexInBlock != UINT_MAX) {
1028         Node* branchNode = m_block->at(branchIndexInBlock);
1029
1030         ASSERT(node->adjustedRefCount() == 1);
1031         
1032         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1033     
1034         m_indexInBlock = branchIndexInBlock;
1035         m_currentNode = branchNode;
1036         
1037         return true;
1038     }
1039     
1040     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1041     
1042     return false;
1043 }
1044
1045 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1046 {
1047     unsigned branchIndexInBlock = detectPeepHoleBranch();
1048     if (branchIndexInBlock != UINT_MAX) {
1049         Node* branchNode = m_block->at(branchIndexInBlock);
1050
1051         ASSERT(node->adjustedRefCount() == 1);
1052         
1053         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1054     
1055         m_indexInBlock = branchIndexInBlock;
1056         m_currentNode = branchNode;
1057         
1058         return true;
1059     }
1060     
1061     nonSpeculativeNonPeepholeStrictEq(node, invert);
1062     
1063     return false;
1064 }
1065
1066 static const char* dataFormatString(DataFormat format)
1067 {
1068     // These values correspond to the DataFormat enum.
1069     const char* strings[] = {
1070         "[  ]",
1071         "[ i]",
1072         "[ d]",
1073         "[ c]",
1074         "Err!",
1075         "Err!",
1076         "Err!",
1077         "Err!",
1078         "[J ]",
1079         "[Ji]",
1080         "[Jd]",
1081         "[Jc]",
1082         "Err!",
1083         "Err!",
1084         "Err!",
1085         "Err!",
1086     };
1087     return strings[format];
1088 }
1089
1090 void SpeculativeJIT::dump(const char* label)
1091 {
1092     if (label)
1093         dataLogF("<%s>\n", label);
1094
1095     dataLogF("  gprs:\n");
1096     m_gprs.dump();
1097     dataLogF("  fprs:\n");
1098     m_fprs.dump();
1099     dataLogF("  VirtualRegisters:\n");
1100     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1101         GenerationInfo& info = m_generationInfo[i];
1102         if (info.alive())
1103             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1104         else
1105             dataLogF("    % 3d:[__][__]", i);
1106         if (info.registerFormat() == DataFormatDouble)
1107             dataLogF(":fpr%d\n", info.fpr());
1108         else if (info.registerFormat() != DataFormatNone
1109 #if USE(JSVALUE32_64)
1110             && !(info.registerFormat() & DataFormatJS)
1111 #endif
1112             ) {
1113             ASSERT(info.gpr() != InvalidGPRReg);
1114             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1115         } else
1116             dataLogF("\n");
1117     }
1118     if (label)
1119         dataLogF("</%s>\n", label);
1120 }
1121
1122 GPRTemporary::GPRTemporary()
1123     : m_jit(0)
1124     , m_gpr(InvalidGPRReg)
1125 {
1126 }
1127
1128 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1129     : m_jit(jit)
1130     , m_gpr(InvalidGPRReg)
1131 {
1132     m_gpr = m_jit->allocate();
1133 }
1134
1135 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1136     : m_jit(jit)
1137     , m_gpr(InvalidGPRReg)
1138 {
1139     m_gpr = m_jit->allocate(specific);
1140 }
1141
1142 #if USE(JSVALUE32_64)
1143 GPRTemporary::GPRTemporary(
1144     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1145     : m_jit(jit)
1146     , m_gpr(InvalidGPRReg)
1147 {
1148     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1149         m_gpr = m_jit->reuse(op1.gpr(which));
1150     else
1151         m_gpr = m_jit->allocate();
1152 }
1153 #endif // USE(JSVALUE32_64)
1154
1155 void GPRTemporary::adopt(GPRTemporary& other)
1156 {
1157     ASSERT(!m_jit);
1158     ASSERT(m_gpr == InvalidGPRReg);
1159     ASSERT(other.m_jit);
1160     ASSERT(other.m_gpr != InvalidGPRReg);
1161     m_jit = other.m_jit;
1162     m_gpr = other.m_gpr;
1163     other.m_jit = 0;
1164     other.m_gpr = InvalidGPRReg;
1165 }
1166
1167 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1168     : m_jit(jit)
1169     , m_fpr(InvalidFPRReg)
1170 {
1171     m_fpr = m_jit->fprAllocate();
1172 }
1173
1174 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1175     : m_jit(jit)
1176     , m_fpr(InvalidFPRReg)
1177 {
1178     if (m_jit->canReuse(op1.node()))
1179         m_fpr = m_jit->reuse(op1.fpr());
1180     else
1181         m_fpr = m_jit->fprAllocate();
1182 }
1183
1184 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1185     : m_jit(jit)
1186     , m_fpr(InvalidFPRReg)
1187 {
1188     if (m_jit->canReuse(op1.node()))
1189         m_fpr = m_jit->reuse(op1.fpr());
1190     else if (m_jit->canReuse(op2.node()))
1191         m_fpr = m_jit->reuse(op2.fpr());
1192     else
1193         m_fpr = m_jit->fprAllocate();
1194 }
1195
1196 #if USE(JSVALUE32_64)
1197 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1198     : m_jit(jit)
1199     , m_fpr(InvalidFPRReg)
1200 {
1201     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1202         m_fpr = m_jit->reuse(op1.fpr());
1203     else
1204         m_fpr = m_jit->fprAllocate();
1205 }
1206 #endif
1207
1208 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1209 {
1210     BasicBlock* taken = branchNode->takenBlock();
1211     BasicBlock* notTaken = branchNode->notTakenBlock();
1212     
1213     SpeculateDoubleOperand op1(this, node->child1());
1214     SpeculateDoubleOperand op2(this, node->child2());
1215     
1216     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1217     jump(notTaken);
1218 }
1219
1220 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1221 {
1222     BasicBlock* taken = branchNode->takenBlock();
1223     BasicBlock* notTaken = branchNode->notTakenBlock();
1224
1225     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1226     
1227     if (taken == nextBlock()) {
1228         condition = MacroAssembler::NotEqual;
1229         BasicBlock* tmp = taken;
1230         taken = notTaken;
1231         notTaken = tmp;
1232     }
1233
1234     SpeculateCellOperand op1(this, node->child1());
1235     SpeculateCellOperand op2(this, node->child2());
1236     
1237     GPRReg op1GPR = op1.gpr();
1238     GPRReg op2GPR = op2.gpr();
1239     
1240     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1241         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1242             speculationCheck(
1243                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), 
1244                 m_jit.branchPtr(
1245                     MacroAssembler::Equal, 
1246                     MacroAssembler::Address(op1GPR, JSCell::structureOffset()), 
1247                     MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1248         }
1249         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1250             speculationCheck(
1251                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1252                 m_jit.branchPtr(
1253                     MacroAssembler::Equal, 
1254                     MacroAssembler::Address(op2GPR, JSCell::structureOffset()), 
1255                     MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1256         }
1257     } else {
1258         GPRTemporary structure(this);
1259         GPRReg structureGPR = structure.gpr();
1260
1261         m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR);
1262         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1263             speculationCheck(
1264                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1265                 m_jit.branchPtr(
1266                     MacroAssembler::Equal, 
1267                     structureGPR, 
1268                     MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1269         }
1270         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1271             m_jit.branchTest8(
1272                 MacroAssembler::NonZero, 
1273                 MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), 
1274                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1275
1276         m_jit.loadPtr(MacroAssembler::Address(op2GPR, JSCell::structureOffset()), structureGPR);
1277         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1278             speculationCheck(
1279                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1280                 m_jit.branchPtr(
1281                     MacroAssembler::Equal, 
1282                     structureGPR, 
1283                     MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1284         }
1285         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1286             m_jit.branchTest8(
1287                 MacroAssembler::NonZero, 
1288                 MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), 
1289                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1290     }
1291
1292     branchPtr(condition, op1GPR, op2GPR, taken);
1293     jump(notTaken);
1294 }
1295
1296 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1297 {
1298     BasicBlock* taken = branchNode->takenBlock();
1299     BasicBlock* notTaken = branchNode->notTakenBlock();
1300
1301     // The branch instruction will branch to the taken block.
1302     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1303     if (taken == nextBlock()) {
1304         condition = JITCompiler::invert(condition);
1305         BasicBlock* tmp = taken;
1306         taken = notTaken;
1307         notTaken = tmp;
1308     }
1309
1310     if (isBooleanConstant(node->child1().node())) {
1311         bool imm = valueOfBooleanConstant(node->child1().node());
1312         SpeculateBooleanOperand op2(this, node->child2());
1313         branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1314     } else if (isBooleanConstant(node->child2().node())) {
1315         SpeculateBooleanOperand op1(this, node->child1());
1316         bool imm = valueOfBooleanConstant(node->child2().node());
1317         branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1318     } else {
1319         SpeculateBooleanOperand op1(this, node->child1());
1320         SpeculateBooleanOperand op2(this, node->child2());
1321         branch32(condition, op1.gpr(), op2.gpr(), taken);
1322     }
1323
1324     jump(notTaken);
1325 }
1326
1327 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1328 {
1329     BasicBlock* taken = branchNode->takenBlock();
1330     BasicBlock* notTaken = branchNode->notTakenBlock();
1331
1332     // The branch instruction will branch to the taken block.
1333     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1334     if (taken == nextBlock()) {
1335         condition = JITCompiler::invert(condition);
1336         BasicBlock* tmp = taken;
1337         taken = notTaken;
1338         notTaken = tmp;
1339     }
1340
1341     if (isInt32Constant(node->child1().node())) {
1342         int32_t imm = valueOfInt32Constant(node->child1().node());
1343         SpeculateInt32Operand op2(this, node->child2());
1344         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1345     } else if (isInt32Constant(node->child2().node())) {
1346         SpeculateInt32Operand op1(this, node->child1());
1347         int32_t imm = valueOfInt32Constant(node->child2().node());
1348         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1349     } else {
1350         SpeculateInt32Operand op1(this, node->child1());
1351         SpeculateInt32Operand op2(this, node->child2());
1352         branch32(condition, op1.gpr(), op2.gpr(), taken);
1353     }
1354
1355     jump(notTaken);
1356 }
1357
1358 // Returns true if the compare is fused with a subsequent branch.
1359 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1360 {
1361     // Fused compare & branch.
1362     unsigned branchIndexInBlock = detectPeepHoleBranch();
1363     if (branchIndexInBlock != UINT_MAX) {
1364         Node* branchNode = m_block->at(branchIndexInBlock);
1365
1366         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1367         // so can be no intervening nodes to also reference the compare. 
1368         ASSERT(node->adjustedRefCount() == 1);
1369
1370         if (node->isBinaryUseKind(Int32Use))
1371             compilePeepHoleInt32Branch(node, branchNode, condition);
1372 #if USE(JSVALUE64)
1373         else if (node->isBinaryUseKind(MachineIntUse))
1374             compilePeepHoleInt52Branch(node, branchNode, condition);
1375 #endif // USE(JSVALUE64)
1376         else if (node->isBinaryUseKind(NumberUse))
1377             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1378         else if (node->op() == CompareEq) {
1379             if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1380                 // Use non-peephole comparison, for now.
1381                 return false;
1382             }
1383             if (node->isBinaryUseKind(BooleanUse))
1384                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1385             else if (node->isBinaryUseKind(ObjectUse))
1386                 compilePeepHoleObjectEquality(node, branchNode);
1387             else if (node->child1().useKind() == ObjectUse && node->child2().useKind() == ObjectOrOtherUse)
1388                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1389             else if (node->child1().useKind() == ObjectOrOtherUse && node->child2().useKind() == ObjectUse)
1390                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1391             else {
1392                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1393                 return true;
1394             }
1395         } else {
1396             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1397             return true;
1398         }
1399
1400         use(node->child1());
1401         use(node->child2());
1402         m_indexInBlock = branchIndexInBlock;
1403         m_currentNode = branchNode;
1404         return true;
1405     }
1406     return false;
1407 }
1408
1409 void SpeculativeJIT::noticeOSRBirth(Node* node)
1410 {
1411     if (!node->hasVirtualRegister())
1412         return;
1413     
1414     VirtualRegister virtualRegister = node->virtualRegister();
1415     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1416     
1417     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1418 }
1419
1420 void SpeculativeJIT::compileMovHint(Node* node)
1421 {
1422     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1423     
1424     Node* child = node->child1().node();
1425     noticeOSRBirth(child);
1426     
1427     if (child->op() == UInt32ToNumber)
1428         noticeOSRBirth(child->child1().node());
1429     
1430     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->local()));
1431 }
1432
1433 void SpeculativeJIT::compileMovHintAndCheck(Node* node)
1434 {
1435     compileMovHint(node);
1436     speculate(node, node->child1());
1437     noResult(node);
1438 }
1439
1440 void SpeculativeJIT::bail()
1441 {
1442     m_compileOkay = true;
1443     m_jit.breakpoint();
1444     clearGenerationInfo();
1445 }
1446
1447 void SpeculativeJIT::compileCurrentBlock()
1448 {
1449     ASSERT(m_compileOkay);
1450     
1451     if (!m_block)
1452         return;
1453     
1454     ASSERT(m_block->isReachable);
1455     
1456     m_jit.blockHeads()[m_block->index] = m_jit.label();
1457
1458     if (!m_block->cfaHasVisited) {
1459         // Don't generate code for basic blocks that are unreachable according to CFA.
1460         // But to be sure that nobody has generated a jump to this block, drop in a
1461         // breakpoint here.
1462         m_jit.breakpoint();
1463         return;
1464     }
1465
1466     m_stream->appendAndLog(VariableEvent::reset());
1467     
1468     m_jit.jitAssertHasValidCallFrame();
1469
1470     for (size_t i = 0; i < m_block->variablesAtHead.numberOfArguments(); ++i) {
1471         m_stream->appendAndLog(
1472             VariableEvent::setLocal(
1473                 virtualRegisterForArgument(i), virtualRegisterForArgument(i), DataFormatJS));
1474     }
1475     
1476     m_state.reset();
1477     m_state.beginBasicBlock(m_block);
1478     
1479     for (size_t i = 0; i < m_block->variablesAtHead.numberOfLocals(); ++i) {
1480         Node* node = m_block->variablesAtHead.local(i);
1481         if (!node)
1482             continue; // No need to record dead SetLocal's.
1483         
1484         VariableAccessData* variable = node->variableAccessData();
1485         DataFormat format;
1486         if (variable->isArgumentsAlias())
1487             format = DataFormatArguments;
1488         else if (!node->refCount())
1489             continue; // No need to record dead SetLocal's.
1490         else
1491             format = dataFormatFor(variable->flushFormat());
1492         m_stream->appendAndLog(
1493             VariableEvent::setLocal(virtualRegisterForLocal(i), variable->machineLocal(), format));
1494     }
1495     
1496     m_codeOriginForExitTarget = CodeOrigin();
1497     m_codeOriginForExitProfile = CodeOrigin();
1498     
1499     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1500         m_currentNode = m_block->at(m_indexInBlock);
1501         
1502         // We may have his a contradiction that the CFA was aware of but that the JIT
1503         // didn't cause directly.
1504         if (!m_state.isValid()) {
1505             bail();
1506             return;
1507         }
1508         
1509         m_canExit = m_currentNode->canExit();
1510         bool shouldExecuteEffects = m_interpreter.startExecuting(m_currentNode);
1511         m_jit.setForNode(m_currentNode);
1512         m_codeOriginForExitTarget = m_currentNode->codeOriginForExitTarget;
1513         m_codeOriginForExitProfile = m_currentNode->codeOrigin;
1514         if (!m_currentNode->shouldGenerate()) {
1515             switch (m_currentNode->op()) {
1516             case JSConstant:
1517                 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1518                 break;
1519                 
1520             case WeakJSConstant:
1521                 m_jit.addWeakReference(m_currentNode->weakConstant());
1522                 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1523                 break;
1524                 
1525             case SetLocal:
1526                 RELEASE_ASSERT_NOT_REACHED();
1527                 break;
1528                 
1529             case MovHint:
1530                 compileMovHint(m_currentNode);
1531                 break;
1532                 
1533             case ZombieHint: {
1534                 recordSetLocal(DataFormatDead);
1535                 break;
1536             }
1537
1538             default:
1539                 if (belongsInMinifiedGraph(m_currentNode->op()))
1540                     m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1541                 break;
1542             }
1543         } else {
1544             
1545             if (verboseCompilationEnabled()) {
1546                 dataLogF(
1547                     "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1548                     (int)m_currentNode->index(),
1549                     m_currentNode->codeOrigin.bytecodeIndex, m_jit.debugOffset());
1550                 dataLog("\n");
1551             }
1552             
1553             m_speculationDirection = (m_currentNode->flags() & NodeExitsForward) ? ForwardSpeculation : BackwardSpeculation;
1554             
1555             compile(m_currentNode);
1556
1557 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1558             m_jit.clearRegisterAllocationOffsets();
1559 #endif
1560
1561             if (!m_compileOkay) {
1562                 bail();
1563                 return;
1564             }
1565             
1566             if (belongsInMinifiedGraph(m_currentNode->op())) {
1567                 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1568                 noticeOSRBirth(m_currentNode);
1569             }
1570         }
1571         
1572         // Make sure that the abstract state is rematerialized for the next node.
1573         if (shouldExecuteEffects)
1574             m_interpreter.executeEffects(m_indexInBlock);
1575     }
1576     
1577     // Perform the most basic verification that children have been used correctly.
1578     if (!ASSERT_DISABLED) {
1579         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1580             GenerationInfo& info = m_generationInfo[index];
1581             RELEASE_ASSERT(!info.alive());
1582         }
1583     }
1584 }
1585
1586 // If we are making type predictions about our arguments then
1587 // we need to check that they are correct on function entry.
1588 void SpeculativeJIT::checkArgumentTypes()
1589 {
1590     ASSERT(!m_currentNode);
1591     m_isCheckingArgumentTypes = true;
1592     m_speculationDirection = BackwardSpeculation;
1593     m_codeOriginForExitTarget = CodeOrigin(0);
1594     m_codeOriginForExitProfile = CodeOrigin(0);
1595
1596     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1597         Node* node = m_jit.graph().m_arguments[i];
1598         ASSERT(node->op() == SetArgument);
1599         if (!node->shouldGenerate()) {
1600             // The argument is dead. We don't do any checks for such arguments.
1601             continue;
1602         }
1603         
1604         VariableAccessData* variableAccessData = node->variableAccessData();
1605         FlushFormat format = variableAccessData->flushFormat();
1606         
1607         if (format == FlushedJSValue)
1608             continue;
1609         
1610         VirtualRegister virtualRegister = variableAccessData->local();
1611
1612         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1613         
1614 #if USE(JSVALUE64)
1615         switch (format) {
1616         case FlushedInt32: {
1617             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1618             break;
1619         }
1620         case FlushedBoolean: {
1621             GPRTemporary temp(this);
1622             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1623             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1624             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1625             break;
1626         }
1627         case FlushedCell: {
1628             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1629             break;
1630         }
1631         default:
1632             RELEASE_ASSERT_NOT_REACHED();
1633             break;
1634         }
1635 #else
1636         switch (format) {
1637         case FlushedInt32: {
1638             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1639             break;
1640         }
1641         case FlushedBoolean: {
1642             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1643             break;
1644         }
1645         case FlushedCell: {
1646             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1647             break;
1648         }
1649         default:
1650             RELEASE_ASSERT_NOT_REACHED();
1651             break;
1652         }
1653 #endif
1654     }
1655     m_isCheckingArgumentTypes = false;
1656 }
1657
1658 bool SpeculativeJIT::compile()
1659 {
1660     checkArgumentTypes();
1661
1662     ASSERT(!m_currentNode);
1663     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1664         m_jit.setForBlockIndex(blockIndex);
1665         m_block = m_jit.graph().block(blockIndex);
1666         compileCurrentBlock();
1667     }
1668     linkBranches();
1669     return true;
1670 }
1671
1672 void SpeculativeJIT::createOSREntries()
1673 {
1674     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1675         BasicBlock* block = m_jit.graph().block(blockIndex);
1676         if (!block)
1677             continue;
1678         if (!block->isOSRTarget)
1679             continue;
1680         
1681         // Currently we don't have OSR entry trampolines. We could add them
1682         // here if need be.
1683         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1684     }
1685 }
1686
1687 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1688 {
1689     unsigned osrEntryIndex = 0;
1690     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1691         BasicBlock* block = m_jit.graph().block(blockIndex);
1692         if (!block)
1693             continue;
1694         if (!block->isOSRTarget)
1695             continue;
1696         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1697     }
1698     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1699 }
1700
1701 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1702 {
1703     Edge child3 = m_jit.graph().varArgChild(node, 2);
1704     Edge child4 = m_jit.graph().varArgChild(node, 3);
1705
1706     ArrayMode arrayMode = node->arrayMode();
1707     
1708     GPRReg baseReg = base.gpr();
1709     GPRReg propertyReg = property.gpr();
1710     
1711     SpeculateDoubleOperand value(this, child3);
1712
1713     FPRReg valueReg = value.fpr();
1714     
1715     DFG_TYPE_CHECK(
1716         JSValueRegs(), child3, SpecFullRealNumber,
1717         m_jit.branchDouble(
1718             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1719     
1720     if (!m_compileOkay)
1721         return;
1722     
1723     StorageOperand storage(this, child4);
1724     GPRReg storageReg = storage.gpr();
1725
1726     if (node->op() == PutByValAlias) {
1727         // Store the value to the array.
1728         GPRReg propertyReg = property.gpr();
1729         FPRReg valueReg = value.fpr();
1730         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1731         
1732         noResult(m_currentNode);
1733         return;
1734     }
1735     
1736     GPRTemporary temporary;
1737     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1738
1739     MacroAssembler::Jump slowCase;
1740     
1741     if (arrayMode.isInBounds()) {
1742         speculationCheck(
1743             OutOfBounds, JSValueRegs(), 0,
1744             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1745     } else {
1746         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1747         
1748         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1749         
1750         if (!arrayMode.isOutOfBounds())
1751             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1752         
1753         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1754         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1755         
1756         inBounds.link(&m_jit);
1757     }
1758     
1759     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1760
1761     base.use();
1762     property.use();
1763     value.use();
1764     storage.use();
1765     
1766     if (arrayMode.isOutOfBounds()) {
1767         addSlowPathGenerator(
1768             slowPathCall(
1769                 slowCase, this,
1770                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1771                 NoResult, baseReg, propertyReg, valueReg));
1772     }
1773
1774     noResult(m_currentNode, UseChildrenCalledExplicitly);
1775 }
1776
1777 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1778 {
1779     SpeculateCellOperand string(this, node->child1());
1780     SpeculateStrictInt32Operand index(this, node->child2());
1781     StorageOperand storage(this, node->child3());
1782
1783     GPRReg stringReg = string.gpr();
1784     GPRReg indexReg = index.gpr();
1785     GPRReg storageReg = storage.gpr();
1786     
1787     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1788
1789     // unsigned comparison so we can filter out negative indices and indices that are too large
1790     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1791
1792     GPRTemporary scratch(this);
1793     GPRReg scratchReg = scratch.gpr();
1794
1795     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1796
1797     // Load the character into scratchReg
1798     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1799
1800     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1801     JITCompiler::Jump cont8Bit = m_jit.jump();
1802
1803     is16Bit.link(&m_jit);
1804
1805     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1806
1807     cont8Bit.link(&m_jit);
1808
1809     int32Result(scratchReg, m_currentNode);
1810 }
1811
1812 void SpeculativeJIT::compileGetByValOnString(Node* node)
1813 {
1814     SpeculateCellOperand base(this, node->child1());
1815     SpeculateStrictInt32Operand property(this, node->child2());
1816     StorageOperand storage(this, node->child3());
1817     GPRReg baseReg = base.gpr();
1818     GPRReg propertyReg = property.gpr();
1819     GPRReg storageReg = storage.gpr();
1820
1821     GPRTemporary scratch(this);
1822     GPRReg scratchReg = scratch.gpr();
1823 #if USE(JSVALUE32_64)
1824     GPRTemporary resultTag;
1825     GPRReg resultTagReg = InvalidGPRReg;
1826     if (node->arrayMode().isOutOfBounds()) {
1827         GPRTemporary realResultTag(this);
1828         resultTag.adopt(realResultTag);
1829         resultTagReg = resultTag.gpr();
1830     }
1831 #endif
1832
1833     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1834
1835     // unsigned comparison so we can filter out negative indices and indices that are too large
1836     JITCompiler::Jump outOfBounds = m_jit.branch32(
1837         MacroAssembler::AboveOrEqual, propertyReg,
1838         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1839     if (node->arrayMode().isInBounds())
1840         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1841
1842     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1843
1844     // Load the character into scratchReg
1845     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1846
1847     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1848     JITCompiler::Jump cont8Bit = m_jit.jump();
1849
1850     is16Bit.link(&m_jit);
1851
1852     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1853
1854     JITCompiler::Jump bigCharacter =
1855         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1856
1857     // 8 bit string values don't need the isASCII check.
1858     cont8Bit.link(&m_jit);
1859
1860     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1861     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1862     m_jit.loadPtr(scratchReg, scratchReg);
1863
1864     addSlowPathGenerator(
1865         slowPathCall(
1866             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1867
1868     if (node->arrayMode().isOutOfBounds()) {
1869 #if USE(JSVALUE32_64)
1870         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1871 #endif
1872
1873         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->codeOrigin);
1874         if (globalObject->stringPrototypeChainIsSane()) {
1875 #if USE(JSVALUE64)
1876             addSlowPathGenerator(adoptPtr(new SaneStringGetByValSlowPathGenerator(
1877                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg)));
1878 #else
1879             addSlowPathGenerator(adoptPtr(new SaneStringGetByValSlowPathGenerator(
1880                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
1881                 baseReg, propertyReg)));
1882 #endif
1883         } else {
1884 #if USE(JSVALUE64)
1885             addSlowPathGenerator(
1886                 slowPathCall(
1887                     outOfBounds, this, operationGetByValStringInt,
1888                     scratchReg, baseReg, propertyReg));
1889 #else
1890             addSlowPathGenerator(
1891                 slowPathCall(
1892                     outOfBounds, this, operationGetByValStringInt,
1893                     resultTagReg, scratchReg, baseReg, propertyReg));
1894 #endif
1895         }
1896         
1897 #if USE(JSVALUE64)
1898         jsValueResult(scratchReg, m_currentNode);
1899 #else
1900         jsValueResult(resultTagReg, scratchReg, m_currentNode);
1901 #endif
1902     } else
1903         cellResult(scratchReg, m_currentNode);
1904 }
1905
1906 void SpeculativeJIT::compileFromCharCode(Node* node)
1907 {
1908     SpeculateStrictInt32Operand property(this, node->child1());
1909     GPRReg propertyReg = property.gpr();
1910     GPRTemporary smallStrings(this);
1911     GPRTemporary scratch(this);
1912     GPRReg scratchReg = scratch.gpr();
1913     GPRReg smallStringsReg = smallStrings.gpr();
1914
1915     JITCompiler::JumpList slowCases;
1916     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
1917     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
1918     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
1919
1920     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
1921     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
1922     cellResult(scratchReg, m_currentNode);
1923 }
1924
1925 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
1926 {
1927     VirtualRegister virtualRegister = node->virtualRegister();
1928     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1929
1930     switch (info.registerFormat()) {
1931     case DataFormatStorage:
1932         RELEASE_ASSERT_NOT_REACHED();
1933
1934     case DataFormatBoolean:
1935     case DataFormatCell:
1936         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1937         return GeneratedOperandTypeUnknown;
1938
1939     case DataFormatNone:
1940     case DataFormatJSCell:
1941     case DataFormatJS:
1942     case DataFormatJSBoolean:
1943         return GeneratedOperandJSValue;
1944
1945     case DataFormatJSInt32:
1946     case DataFormatInt32:
1947         return GeneratedOperandInteger;
1948
1949     case DataFormatJSDouble:
1950     case DataFormatDouble:
1951         return GeneratedOperandDouble;
1952         
1953     default:
1954         RELEASE_ASSERT_NOT_REACHED();
1955         return GeneratedOperandTypeUnknown;
1956     }
1957 }
1958
1959 void SpeculativeJIT::compileValueToInt32(Node* node)
1960 {
1961     switch (node->child1().useKind()) {
1962     case Int32Use: {
1963         SpeculateInt32Operand op1(this, node->child1());
1964         GPRTemporary result(this, Reuse, op1);
1965         m_jit.move(op1.gpr(), result.gpr());
1966         int32Result(result.gpr(), node, op1.format());
1967         return;
1968     }
1969         
1970 #if USE(JSVALUE64)
1971     case MachineIntUse: {
1972         SpeculateStrictInt52Operand op1(this, node->child1());
1973         GPRTemporary result(this, Reuse, op1);
1974         GPRReg op1GPR = op1.gpr();
1975         GPRReg resultGPR = result.gpr();
1976         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
1977         int32Result(resultGPR, node, DataFormatInt32);
1978         return;
1979     }
1980 #endif // USE(JSVALUE64)
1981     
1982     case NumberUse:
1983     case NotCellUse: {
1984         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
1985         case GeneratedOperandInteger: {
1986             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
1987             GPRTemporary result(this, Reuse, op1);
1988             m_jit.move(op1.gpr(), result.gpr());
1989             int32Result(result.gpr(), node, op1.format());
1990             return;
1991         }
1992         case GeneratedOperandDouble: {
1993             GPRTemporary result(this);
1994             SpeculateDoubleOperand op1(this, node->child1(), ManualOperandSpeculation);
1995             FPRReg fpr = op1.fpr();
1996             GPRReg gpr = result.gpr();
1997             JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
1998             
1999             addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
2000
2001             int32Result(gpr, node);
2002             return;
2003         }
2004         case GeneratedOperandJSValue: {
2005             GPRTemporary result(this);
2006 #if USE(JSVALUE64)
2007             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2008
2009             GPRReg gpr = op1.gpr();
2010             GPRReg resultGpr = result.gpr();
2011             FPRTemporary tempFpr(this);
2012             FPRReg fpr = tempFpr.fpr();
2013
2014             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2015             JITCompiler::JumpList converted;
2016
2017             if (node->child1().useKind() == NumberUse) {
2018                 DFG_TYPE_CHECK(
2019                     JSValueRegs(gpr), node->child1(), SpecFullNumber,
2020                     m_jit.branchTest64(
2021                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2022             } else {
2023                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2024                 
2025                 DFG_TYPE_CHECK(
2026                     JSValueRegs(gpr), node->child1(), ~SpecCell,
2027                     m_jit.branchTest64(
2028                         JITCompiler::Zero, gpr, GPRInfo::tagMaskRegister));
2029                 
2030                 // It's not a cell: so true turns into 1 and all else turns into 0.
2031                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2032                 converted.append(m_jit.jump());
2033                 
2034                 isNumber.link(&m_jit);
2035             }
2036
2037             // First, if we get here we have a double encoded as a JSValue
2038             m_jit.move(gpr, resultGpr);
2039             unboxDouble(resultGpr, fpr);
2040
2041             silentSpillAllRegisters(resultGpr);
2042             callOperation(toInt32, resultGpr, fpr);
2043             silentFillAllRegisters(resultGpr);
2044
2045             converted.append(m_jit.jump());
2046
2047             isInteger.link(&m_jit);
2048             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2049
2050             converted.link(&m_jit);
2051 #else
2052             Node* childNode = node->child1().node();
2053             VirtualRegister virtualRegister = childNode->virtualRegister();
2054             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2055
2056             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2057
2058             GPRReg payloadGPR = op1.payloadGPR();
2059             GPRReg resultGpr = result.gpr();
2060         
2061             JITCompiler::JumpList converted;
2062
2063             if (info.registerFormat() == DataFormatJSInt32)
2064                 m_jit.move(payloadGPR, resultGpr);
2065             else {
2066                 GPRReg tagGPR = op1.tagGPR();
2067                 FPRTemporary tempFpr(this);
2068                 FPRReg fpr = tempFpr.fpr();
2069                 FPRTemporary scratch(this);
2070
2071                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2072
2073                 if (node->child1().useKind() == NumberUse) {
2074                     DFG_TYPE_CHECK(
2075                         JSValueRegs(tagGPR, payloadGPR), node->child1(), SpecFullNumber,
2076                         m_jit.branch32(
2077                             MacroAssembler::AboveOrEqual, tagGPR,
2078                             TrustedImm32(JSValue::LowestTag)));
2079                 } else {
2080                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2081                     
2082                     DFG_TYPE_CHECK(
2083                         JSValueRegs(tagGPR, payloadGPR), node->child1(), ~SpecCell,
2084                         m_jit.branch32(
2085                             JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::CellTag)));
2086                     
2087                     // It's not a cell: so true turns into 1 and all else turns into 0.
2088                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2089                     m_jit.move(TrustedImm32(0), resultGpr);
2090                     converted.append(m_jit.jump());
2091                     
2092                     isBoolean.link(&m_jit);
2093                     m_jit.move(payloadGPR, resultGpr);
2094                     converted.append(m_jit.jump());
2095                     
2096                     isNumber.link(&m_jit);
2097                 }
2098
2099                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2100
2101                 silentSpillAllRegisters(resultGpr);
2102                 callOperation(toInt32, resultGpr, fpr);
2103                 silentFillAllRegisters(resultGpr);
2104
2105                 converted.append(m_jit.jump());
2106
2107                 isInteger.link(&m_jit);
2108                 m_jit.move(payloadGPR, resultGpr);
2109
2110                 converted.link(&m_jit);
2111             }
2112 #endif
2113             int32Result(resultGpr, node);
2114             return;
2115         }
2116         case GeneratedOperandTypeUnknown:
2117             RELEASE_ASSERT(!m_compileOkay);
2118             return;
2119         }
2120         RELEASE_ASSERT_NOT_REACHED();
2121         return;
2122     }
2123     
2124     case BooleanUse: {
2125         SpeculateBooleanOperand op1(this, node->child1());
2126         GPRTemporary result(this, Reuse, op1);
2127         
2128         m_jit.move(op1.gpr(), result.gpr());
2129         m_jit.and32(JITCompiler::TrustedImm32(1), result.gpr());
2130         
2131         int32Result(result.gpr(), node);
2132         return;
2133     }
2134
2135     default:
2136         ASSERT(!m_compileOkay);
2137         return;
2138     }
2139 }
2140
2141 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2142 {
2143     if (!nodeCanSpeculateInt32(node->arithNodeFlags())) {
2144         // We know that this sometimes produces doubles. So produce a double every
2145         // time. This at least allows subsequent code to not have weird conditionals.
2146             
2147         SpeculateInt32Operand op1(this, node->child1());
2148         FPRTemporary result(this);
2149             
2150         GPRReg inputGPR = op1.gpr();
2151         FPRReg outputFPR = result.fpr();
2152             
2153         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2154             
2155         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2156         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2157         positive.link(&m_jit);
2158             
2159         doubleResult(outputFPR, node);
2160         return;
2161     }
2162
2163     SpeculateInt32Operand op1(this, node->child1());
2164     GPRTemporary result(this); // For the benefit of OSR exit, force these to be in different registers. In reality the OSR exit compiler could find cases where you have uint32(%r1) followed by int32(%r1) and then use different registers, but that seems like too much effort.
2165
2166     m_jit.move(op1.gpr(), result.gpr());
2167
2168     // Test the operand is positive. This is a very special speculation check - we actually
2169     // use roll-forward speculation here, where if this fails, we jump to the baseline
2170     // instruction that follows us, rather than the one we're executing right now. We have
2171     // to do this because by this point, the original values necessary to compile whatever
2172     // operation the UInt32ToNumber originated from might be dead.
2173     forwardSpeculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)), ValueRecovery::uint32InGPR(result.gpr()));
2174
2175     int32Result(result.gpr(), node, op1.format());
2176 }
2177
2178 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2179 {
2180     SpeculateDoubleOperand op1(this, node->child1());
2181     FPRTemporary scratch(this);
2182     GPRTemporary result(this);
2183     
2184     FPRReg valueFPR = op1.fpr();
2185     FPRReg scratchFPR = scratch.fpr();
2186     GPRReg resultGPR = result.gpr();
2187
2188     JITCompiler::JumpList failureCases;
2189     bool negZeroCheck = !bytecodeCanIgnoreNegativeZero(node->arithNodeFlags());
2190     m_jit.branchConvertDoubleToInt32(valueFPR, resultGPR, failureCases, scratchFPR, negZeroCheck);
2191     forwardSpeculationCheck(Overflow, JSValueRegs(), 0, failureCases, ValueRecovery::inFPR(valueFPR));
2192
2193     int32Result(resultGPR, node);
2194 }
2195
2196 void SpeculativeJIT::compileInt32ToDouble(Node* node)
2197 {
2198     ASSERT(!isInt32Constant(node->child1().node())); // This should have been constant folded.
2199     
2200     if (isInt32Speculation(m_state.forNode(node->child1()).m_type)) {
2201         SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2202         FPRTemporary result(this);
2203         m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2204         doubleResult(result.fpr(), node);
2205         return;
2206     }
2207     
2208     JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2209     FPRTemporary result(this);
2210     
2211 #if USE(JSVALUE64)
2212     GPRTemporary temp(this);
2213
2214     GPRReg op1GPR = op1.gpr();
2215     GPRReg tempGPR = temp.gpr();
2216     FPRReg resultFPR = result.fpr();
2217     
2218     JITCompiler::Jump isInteger = m_jit.branch64(
2219         MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2220     
2221     if (needsTypeCheck(node->child1(), SpecFullNumber)) {
2222         if (node->flags() & NodeExitsForward) {
2223             forwardTypeCheck(
2224                 JSValueRegs(op1GPR), node->child1(), SpecFullNumber,
2225                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister),
2226                 ValueRecovery::inGPR(op1GPR, DataFormatJS));
2227         } else {
2228             backwardTypeCheck(
2229                 JSValueRegs(op1GPR), node->child1(), SpecFullNumber,
2230                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2231         }
2232     }
2233     
2234     m_jit.move(op1GPR, tempGPR);
2235     unboxDouble(tempGPR, resultFPR);
2236     JITCompiler::Jump done = m_jit.jump();
2237     
2238     isInteger.link(&m_jit);
2239     m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2240     done.link(&m_jit);
2241 #else
2242     FPRTemporary temp(this);
2243     
2244     GPRReg op1TagGPR = op1.tagGPR();
2245     GPRReg op1PayloadGPR = op1.payloadGPR();
2246     FPRReg tempFPR = temp.fpr();
2247     FPRReg resultFPR = result.fpr();
2248     
2249     JITCompiler::Jump isInteger = m_jit.branch32(
2250         MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2251     
2252     if (needsTypeCheck(node->child1(), SpecFullNumber)) {
2253         if (node->flags() & NodeExitsForward) {
2254             forwardTypeCheck(
2255                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecFullNumber,
2256                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)),
2257                 ValueRecovery::inPair(op1TagGPR, op1PayloadGPR));
2258         } else {
2259             backwardTypeCheck(
2260                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecFullNumber,
2261                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2262         }
2263     }
2264     
2265     unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2266     JITCompiler::Jump done = m_jit.jump();
2267     
2268     isInteger.link(&m_jit);
2269     m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2270     done.link(&m_jit);
2271 #endif
2272     
2273     doubleResult(resultFPR, node);
2274 }
2275
2276 static double clampDoubleToByte(double d)
2277 {
2278     d += 0.5;
2279     if (!(d > 0))
2280         d = 0;
2281     else if (d > 255)
2282         d = 255;
2283     return d;
2284 }
2285
2286 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2287 {
2288     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2289     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2290     jit.xorPtr(result, result);
2291     MacroAssembler::Jump clamped = jit.jump();
2292     tooBig.link(&jit);
2293     jit.move(JITCompiler::TrustedImm32(255), result);
2294     clamped.link(&jit);
2295     inBounds.link(&jit);
2296 }
2297
2298 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2299 {
2300     // Unordered compare so we pick up NaN
2301     static const double zero = 0;
2302     static const double byteMax = 255;
2303     static const double half = 0.5;
2304     jit.loadDouble(&zero, scratch);
2305     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2306     jit.loadDouble(&byteMax, scratch);
2307     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2308     
2309     jit.loadDouble(&half, scratch);
2310     // FIXME: This should probably just use a floating point round!
2311     // https://bugs.webkit.org/show_bug.cgi?id=72054
2312     jit.addDouble(source, scratch);
2313     jit.truncateDoubleToInt32(scratch, result);   
2314     MacroAssembler::Jump truncatedInt = jit.jump();
2315     
2316     tooSmall.link(&jit);
2317     jit.xorPtr(result, result);
2318     MacroAssembler::Jump zeroed = jit.jump();
2319     
2320     tooBig.link(&jit);
2321     jit.move(JITCompiler::TrustedImm32(255), result);
2322     
2323     truncatedInt.link(&jit);
2324     zeroed.link(&jit);
2325
2326 }
2327
2328 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2329 {
2330     if (node->op() == PutByValAlias)
2331         return JITCompiler::Jump();
2332     if (JSArrayBufferView* view = m_jit.graph().tryGetFoldableViewForChild1(node)) {
2333         uint32_t length = view->length();
2334         Node* indexNode = m_jit.graph().child(node, 1).node();
2335         if (m_jit.graph().isInt32Constant(indexNode) && static_cast<uint32_t>(m_jit.graph().valueOfInt32Constant(indexNode)) < length)
2336             return JITCompiler::Jump();
2337         return m_jit.branch32(
2338             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2339     }
2340     return m_jit.branch32(
2341         MacroAssembler::AboveOrEqual, indexGPR,
2342         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2343 }
2344
2345 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2346 {
2347     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2348     if (!jump.isSet())
2349         return;
2350     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2351 }
2352
2353 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2354 {
2355     ASSERT(isInt(type));
2356     
2357     SpeculateCellOperand base(this, node->child1());
2358     SpeculateStrictInt32Operand property(this, node->child2());
2359     StorageOperand storage(this, node->child3());
2360
2361     GPRReg baseReg = base.gpr();
2362     GPRReg propertyReg = property.gpr();
2363     GPRReg storageReg = storage.gpr();
2364
2365     GPRTemporary result(this);
2366     GPRReg resultReg = result.gpr();
2367
2368     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2369
2370     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2371     switch (elementSize(type)) {
2372     case 1:
2373         if (isSigned(type))
2374             m_jit.load8Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2375         else
2376             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2377         break;
2378     case 2:
2379         if (isSigned(type))
2380             m_jit.load16Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2381         else
2382             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2383         break;
2384     case 4:
2385         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2386         break;
2387     default:
2388         CRASH();
2389     }
2390     if (elementSize(type) < 4 || isSigned(type)) {
2391         int32Result(resultReg, node);
2392         return;
2393     }
2394     
2395     ASSERT(elementSize(type) == 4 && !isSigned(type));
2396     if (node->shouldSpeculateInt32()) {
2397         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2398         int32Result(resultReg, node);
2399         return;
2400     }
2401     
2402 #if USE(JSVALUE64)
2403     if (node->shouldSpeculateMachineInt()) {
2404         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2405         strictInt52Result(resultReg, node);
2406         return;
2407     }
2408 #endif
2409     
2410     FPRTemporary fresult(this);
2411     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2412     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2413     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2414     positive.link(&m_jit);
2415     doubleResult(fresult.fpr(), node);
2416 }
2417
2418 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2419 {
2420     ASSERT(isInt(type));
2421     
2422     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2423     GPRReg storageReg = storage.gpr();
2424     
2425     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2426     
2427     GPRTemporary value;
2428     GPRReg valueGPR = InvalidGPRReg;
2429     
2430     if (valueUse->isConstant()) {
2431         JSValue jsValue = valueOfJSConstant(valueUse.node());
2432         if (!jsValue.isNumber()) {
2433             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2434             noResult(node);
2435             return;
2436         }
2437         double d = jsValue.asNumber();
2438         if (isClamped(type)) {
2439             ASSERT(elementSize(type) == 1);
2440             d = clampDoubleToByte(d);
2441         }
2442         GPRTemporary scratch(this);
2443         GPRReg scratchReg = scratch.gpr();
2444         m_jit.move(Imm32(toInt32(d)), scratchReg);
2445         value.adopt(scratch);
2446         valueGPR = scratchReg;
2447     } else {
2448         switch (valueUse.useKind()) {
2449         case Int32Use: {
2450             SpeculateInt32Operand valueOp(this, valueUse);
2451             GPRTemporary scratch(this);
2452             GPRReg scratchReg = scratch.gpr();
2453             m_jit.move(valueOp.gpr(), scratchReg);
2454             if (isClamped(type)) {
2455                 ASSERT(elementSize(type) == 1);
2456                 compileClampIntegerToByte(m_jit, scratchReg);
2457             }
2458             value.adopt(scratch);
2459             valueGPR = scratchReg;
2460             break;
2461         }
2462             
2463 #if USE(JSVALUE64)
2464         case MachineIntUse: {
2465             SpeculateStrictInt52Operand valueOp(this, valueUse);
2466             GPRTemporary scratch(this);
2467             GPRReg scratchReg = scratch.gpr();
2468             m_jit.move(valueOp.gpr(), scratchReg);
2469             if (isClamped(type)) {
2470                 ASSERT(elementSize(type) == 1);
2471                 MacroAssembler::Jump inBounds = m_jit.branch64(
2472                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2473                 MacroAssembler::Jump tooBig = m_jit.branch64(
2474                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2475                 m_jit.move(TrustedImm32(0), scratchReg);
2476                 MacroAssembler::Jump clamped = m_jit.jump();
2477                 tooBig.link(&m_jit);
2478                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2479                 clamped.link(&m_jit);
2480                 inBounds.link(&m_jit);
2481             }
2482             value.adopt(scratch);
2483             valueGPR = scratchReg;
2484             break;
2485         }
2486 #endif // USE(JSVALUE64)
2487             
2488         case NumberUse: {
2489             if (isClamped(type)) {
2490                 ASSERT(elementSize(type) == 1);
2491                 SpeculateDoubleOperand valueOp(this, valueUse);
2492                 GPRTemporary result(this);
2493                 FPRTemporary floatScratch(this);
2494                 FPRReg fpr = valueOp.fpr();
2495                 GPRReg gpr = result.gpr();
2496                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2497                 value.adopt(result);
2498                 valueGPR = gpr;
2499             } else {
2500                 SpeculateDoubleOperand valueOp(this, valueUse);
2501                 GPRTemporary result(this);
2502                 FPRReg fpr = valueOp.fpr();
2503                 GPRReg gpr = result.gpr();
2504                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2505                 m_jit.xorPtr(gpr, gpr);
2506                 MacroAssembler::Jump fixed = m_jit.jump();
2507                 notNaN.link(&m_jit);
2508                 
2509                 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2510                     fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2511                 
2512                 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr));
2513                 
2514                 fixed.link(&m_jit);
2515                 value.adopt(result);
2516                 valueGPR = gpr;
2517             }
2518             break;
2519         }
2520             
2521         default:
2522             RELEASE_ASSERT_NOT_REACHED();
2523             break;
2524         }
2525     }
2526     
2527     ASSERT_UNUSED(valueGPR, valueGPR != property);
2528     ASSERT(valueGPR != base);
2529     ASSERT(valueGPR != storageReg);
2530     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2531
2532     switch (elementSize(type)) {
2533     case 1:
2534         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2535         break;
2536     case 2:
2537         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2538         break;
2539     case 4:
2540         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2541         break;
2542     default:
2543         CRASH();
2544     }
2545     if (outOfBounds.isSet())
2546         outOfBounds.link(&m_jit);
2547     noResult(node);
2548 }
2549
2550 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2551 {
2552     ASSERT(isFloat(type));
2553     
2554     SpeculateCellOperand base(this, node->child1());
2555     SpeculateStrictInt32Operand property(this, node->child2());
2556     StorageOperand storage(this, node->child3());
2557
2558     GPRReg baseReg = base.gpr();
2559     GPRReg propertyReg = property.gpr();
2560     GPRReg storageReg = storage.gpr();
2561
2562     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2563
2564     FPRTemporary result(this);
2565     FPRReg resultReg = result.fpr();
2566     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2567     switch (elementSize(type)) {
2568     case 4:
2569         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2570         m_jit.convertFloatToDouble(resultReg, resultReg);
2571         break;
2572     case 8: {
2573         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2574         break;
2575     }
2576     default:
2577         RELEASE_ASSERT_NOT_REACHED();
2578     }
2579     
2580     MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, resultReg, resultReg);
2581     static const double NaN = QNaN;
2582     m_jit.loadDouble(&NaN, resultReg);
2583     notNaN.link(&m_jit);
2584     
2585     doubleResult(resultReg, node);
2586 }
2587
2588 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2589 {
2590     ASSERT(isFloat(type));
2591     
2592     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2593     GPRReg storageReg = storage.gpr();
2594     
2595     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2596     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2597
2598     SpeculateDoubleOperand valueOp(this, valueUse);
2599     FPRTemporary scratch(this);
2600     FPRReg valueFPR = valueOp.fpr();
2601     FPRReg scratchFPR = scratch.fpr();
2602
2603     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2604     
2605     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2606     
2607     switch (elementSize(type)) {
2608     case 4: {
2609         m_jit.moveDouble(valueFPR, scratchFPR);
2610         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2611         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2612         break;
2613     }
2614     case 8:
2615         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2616         break;
2617     default:
2618         RELEASE_ASSERT_NOT_REACHED();
2619     }
2620     if (outOfBounds.isSet())
2621         outOfBounds.link(&m_jit);
2622     noResult(node);
2623 }
2624
2625 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg)
2626 {
2627     // Check that prototype is an object.
2628     m_jit.loadPtr(MacroAssembler::Address(prototypeReg, JSCell::structureOffset()), scratchReg);
2629     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(scratchReg));
2630     
2631     // Initialize scratchReg with the value being checked.
2632     m_jit.move(valueReg, scratchReg);
2633     
2634     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2635     MacroAssembler::Label loop(&m_jit);
2636     m_jit.loadPtr(MacroAssembler::Address(scratchReg, JSCell::structureOffset()), scratchReg);
2637 #if USE(JSVALUE64)
2638     m_jit.load64(MacroAssembler::Address(scratchReg, Structure::prototypeOffset()), scratchReg);
2639     MacroAssembler::Jump isInstance = m_jit.branch64(MacroAssembler::Equal, scratchReg, prototypeReg);
2640     m_jit.branchTest64(MacroAssembler::Zero, scratchReg, GPRInfo::tagMaskRegister).linkTo(loop, &m_jit);
2641 #else
2642     m_jit.load32(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), scratchReg);
2643     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2644     m_jit.branchTest32(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2645 #endif
2646     
2647     // No match - result is false.
2648 #if USE(JSVALUE64)
2649     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2650 #else
2651     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2652 #endif
2653     MacroAssembler::Jump putResult = m_jit.jump();
2654     
2655     isInstance.link(&m_jit);
2656 #if USE(JSVALUE64)
2657     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2658 #else
2659     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2660 #endif
2661     
2662     putResult.link(&m_jit);
2663 }
2664
2665 void SpeculativeJIT::compileInstanceOf(Node* node)
2666 {
2667     if (node->child1().useKind() == UntypedUse) {
2668         // It might not be a cell. Speculate less aggressively.
2669         // Or: it might only be used once (i.e. by us), so we get zero benefit
2670         // from speculating any more aggressively than we absolutely need to.
2671         
2672         JSValueOperand value(this, node->child1());
2673         SpeculateCellOperand prototype(this, node->child2());
2674         GPRTemporary scratch(this);
2675         
2676         GPRReg prototypeReg = prototype.gpr();
2677         GPRReg scratchReg = scratch.gpr();
2678         
2679 #if USE(JSVALUE64)
2680         GPRReg valueReg = value.gpr();
2681         MacroAssembler::Jump isCell = m_jit.branchTest64(MacroAssembler::Zero, valueReg, GPRInfo::tagMaskRegister);
2682         m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2683 #else
2684         GPRReg valueTagReg = value.tagGPR();
2685         GPRReg valueReg = value.payloadGPR();
2686         MacroAssembler::Jump isCell = m_jit.branch32(MacroAssembler::Equal, valueTagReg, TrustedImm32(JSValue::CellTag));
2687         m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2688 #endif
2689
2690         MacroAssembler::Jump done = m_jit.jump();
2691         
2692         isCell.link(&m_jit);
2693         
2694         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg);
2695         
2696         done.link(&m_jit);
2697
2698 #if USE(JSVALUE64)
2699         jsValueResult(scratchReg, node, DataFormatJSBoolean);
2700 #else
2701         booleanResult(scratchReg, node);
2702 #endif
2703         return;
2704     }
2705     
2706     SpeculateCellOperand value(this, node->child1());
2707     SpeculateCellOperand prototype(this, node->child2());
2708     
2709     GPRTemporary scratch(this);
2710     
2711     GPRReg valueReg = value.gpr();
2712     GPRReg prototypeReg = prototype.gpr();
2713     GPRReg scratchReg = scratch.gpr();
2714     
2715     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg);
2716
2717 #if USE(JSVALUE64)
2718     jsValueResult(scratchReg, node, DataFormatJSBoolean);
2719 #else
2720     booleanResult(scratchReg, node);
2721 #endif
2722 }
2723
2724 void SpeculativeJIT::compileAdd(Node* node)
2725 {
2726     switch (node->binaryUseKind()) {
2727     case Int32Use: {
2728         if (isNumberConstant(node->child1().node())) {
2729             int32_t imm1 = valueOfInt32Constant(node->child1().node());
2730             SpeculateInt32Operand op2(this, node->child2());
2731             GPRTemporary result(this);
2732
2733             if (bytecodeCanTruncateInteger(node->arithNodeFlags())) {
2734                 m_jit.move(op2.gpr(), result.gpr());
2735                 m_jit.add32(Imm32(imm1), result.gpr());
2736             } else
2737                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
2738
2739             int32Result(result.gpr(), node);
2740             return;
2741         }
2742         
2743         if (isNumberConstant(node->child2().node())) {
2744             SpeculateInt32Operand op1(this, node->child1());
2745             int32_t imm2 = valueOfInt32Constant(node->child2().node());
2746             GPRTemporary result(this);
2747                 
2748             if (bytecodeCanTruncateInteger(node->arithNodeFlags())) {
2749                 m_jit.move(op1.gpr(), result.gpr());
2750                 m_jit.add32(Imm32(imm2), result.gpr());
2751             } else
2752                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
2753
2754             int32Result(result.gpr(), node);
2755             return;
2756         }
2757                 
2758         SpeculateInt32Operand op1(this, node->child1());
2759         SpeculateInt32Operand op2(this, node->child2());
2760         GPRTemporary result(this, Reuse, op1, op2);
2761
2762         GPRReg gpr1 = op1.gpr();
2763         GPRReg gpr2 = op2.gpr();
2764         GPRReg gprResult = result.gpr();
2765
2766         if (bytecodeCanTruncateInteger(node->arithNodeFlags())) {
2767             if (gpr1 == gprResult)
2768                 m_jit.add32(gpr2, gprResult);
2769             else {
2770                 m_jit.move(gpr2, gprResult);
2771                 m_jit.add32(gpr1, gprResult);
2772             }
2773         } else {
2774             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
2775                 
2776             if (gpr1 == gprResult)
2777                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
2778             else if (gpr2 == gprResult)
2779                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
2780             else
2781                 speculationCheck(Overflow, JSValueRegs(), 0, check);
2782         }
2783
2784         int32Result(gprResult, node);
2785         return;
2786     }
2787         
2788 #if USE(JSVALUE64)
2789     case MachineIntUse: {
2790         // Will we need an overflow check? If we can prove that neither input can be
2791         // Int52 then the overflow check will not be necessary.
2792         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2793             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2794             SpeculateWhicheverInt52Operand op1(this, node->child1());
2795             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2796             GPRTemporary result(this, Reuse, op1);
2797             m_jit.move(op1.gpr(), result.gpr());
2798             m_jit.add64(op2.gpr(), result.gpr());
2799             int52Result(result.gpr(), node, op1.format());
2800             return;
2801         }
2802         
2803         SpeculateInt52Operand op1(this, node->child1());
2804         SpeculateInt52Operand op2(this, node->child2());
2805         GPRTemporary result(this);
2806         m_jit.move(op1.gpr(), result.gpr());
2807         speculationCheck(
2808             Int52Overflow, JSValueRegs(), 0,
2809             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2810         int52Result(result.gpr(), node);
2811         return;
2812     }
2813 #endif // USE(JSVALUE64)
2814     
2815     case NumberUse: {
2816         SpeculateDoubleOperand op1(this, node->child1());
2817         SpeculateDoubleOperand op2(this, node->child2());
2818         FPRTemporary result(this, op1, op2);
2819
2820         FPRReg reg1 = op1.fpr();
2821         FPRReg reg2 = op2.fpr();
2822         m_jit.addDouble(reg1, reg2, result.fpr());
2823
2824         doubleResult(result.fpr(), node);
2825         return;
2826     }
2827         
2828     case UntypedUse: {
2829         RELEASE_ASSERT(node->op() == ValueAdd);
2830         compileValueAdd(node);
2831         return;
2832     }
2833         
2834     default:
2835         RELEASE_ASSERT_NOT_REACHED();
2836         break;
2837     }
2838 }
2839
2840 void SpeculativeJIT::compileMakeRope(Node* node)
2841 {
2842     ASSERT(node->child1().useKind() == KnownStringUse);
2843     ASSERT(node->child2().useKind() == KnownStringUse);
2844     ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
2845     
2846     SpeculateCellOperand op1(this, node->child1());
2847     SpeculateCellOperand op2(this, node->child2());
2848     SpeculateCellOperand op3(this, node->child3());
2849     GPRTemporary result(this);
2850     GPRTemporary allocator(this);
2851     GPRTemporary scratch(this);
2852     
2853     GPRReg opGPRs[3];
2854     unsigned numOpGPRs;
2855     opGPRs[0] = op1.gpr();
2856     opGPRs[1] = op2.gpr();
2857     if (node->child3()) {
2858         opGPRs[2] = op3.gpr();
2859         numOpGPRs = 3;
2860     } else {
2861         opGPRs[2] = InvalidGPRReg;
2862         numOpGPRs = 2;
2863     }
2864     GPRReg resultGPR = result.gpr();
2865     GPRReg allocatorGPR = allocator.gpr();
2866     GPRReg scratchGPR = scratch.gpr();
2867     
2868     JITCompiler::JumpList slowPath;
2869     MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithImmortalStructureDestructor(sizeof(JSRopeString));
2870     m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
2871     emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
2872         
2873     m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
2874     for (unsigned i = 0; i < numOpGPRs; ++i)
2875         m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2876     for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
2877         m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2878     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
2879     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
2880     for (unsigned i = 1; i < numOpGPRs; ++i) {
2881         m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
2882         m_jit.add32(JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR);
2883     }
2884     m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
2885     m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
2886     m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
2887     
2888     switch (numOpGPRs) {
2889     case 2:
2890         addSlowPathGenerator(slowPathCall(
2891             slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
2892         break;
2893     case 3:
2894         addSlowPathGenerator(slowPathCall(
2895             slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
2896         break;
2897     default:
2898         RELEASE_ASSERT_NOT_REACHED();
2899         break;
2900     }
2901         
2902     cellResult(resultGPR, node);
2903 }
2904
2905 void SpeculativeJIT::compileArithSub(Node* node)
2906 {
2907     switch (node->binaryUseKind()) {
2908     case Int32Use: {
2909         if (isNumberConstant(node->child2().node())) {
2910             SpeculateInt32Operand op1(this, node->child1());
2911             int32_t imm2 = valueOfInt32Constant(node->child2().node());
2912             GPRTemporary result(this);
2913
2914             if (bytecodeCanTruncateInteger(node->arithNodeFlags())) {
2915                 m_jit.move(op1.gpr(), result.gpr());
2916                 m_jit.sub32(Imm32(imm2), result.gpr());
2917             } else {
2918                 GPRTemporary scratch(this);
2919                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
2920             }
2921
2922             int32Result(result.gpr(), node);
2923             return;
2924         }
2925             
2926         if (isNumberConstant(node->child1().node())) {
2927             int32_t imm1 = valueOfInt32Constant(node->child1().node());
2928             SpeculateInt32Operand op2(this, node->child2());
2929             GPRTemporary result(this);
2930                 
2931             m_jit.move(Imm32(imm1), result.gpr());
2932             if (bytecodeCanTruncateInteger(node->arithNodeFlags()))
2933                 m_jit.sub32(op2.gpr(), result.gpr());
2934             else
2935                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2936                 
2937             int32Result(result.gpr(), node);
2938             return;
2939         }
2940             
2941         SpeculateInt32Operand op1(this, node->child1());
2942         SpeculateInt32Operand op2(this, node->child2());
2943         GPRTemporary result(this);
2944
2945         if (bytecodeCanTruncateInteger(node->arithNodeFlags())) {
2946             m_jit.move(op1.gpr(), result.gpr());
2947             m_jit.sub32(op2.gpr(), result.gpr());
2948         } else
2949             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
2950
2951         int32Result(result.gpr(), node);
2952         return;
2953     }
2954         
2955 #if USE(JSVALUE64)
2956     case MachineIntUse: {
2957         // Will we need an overflow check? If we can prove that neither input can be
2958         // Int52 then the overflow check will not be necessary.
2959         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2960             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2961             SpeculateWhicheverInt52Operand op1(this, node->child1());
2962             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2963             GPRTemporary result(this, Reuse, op1);
2964             m_jit.move(op1.gpr(), result.gpr());
2965             m_jit.sub64(op2.gpr(), result.gpr());
2966             int52Result(result.gpr(), node, op1.format());
2967             return;
2968         }
2969         
2970         SpeculateInt52Operand op1(this, node->child1());
2971         SpeculateInt52Operand op2(this, node->child2());
2972         GPRTemporary result(this);
2973         m_jit.move(op1.gpr(), result.gpr());
2974         speculationCheck(
2975             Int52Overflow, JSValueRegs(), 0,
2976             m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2977         int52Result(result.gpr(), node);
2978         return;
2979     }
2980 #endif // USE(JSVALUE64)
2981
2982     case NumberUse: {
2983         SpeculateDoubleOperand op1(this, node->child1());
2984         SpeculateDoubleOperand op2(this, node->child2());
2985         FPRTemporary result(this, op1);
2986
2987         FPRReg reg1 = op1.fpr();
2988         FPRReg reg2 = op2.fpr();
2989         m_jit.subDouble(reg1, reg2, result.fpr());
2990
2991         doubleResult(result.fpr(), node);
2992         return;
2993     }
2994         
2995     default:
2996         RELEASE_ASSERT_NOT_REACHED();
2997         return;
2998     }
2999 }
3000
3001 void SpeculativeJIT::compileArithNegate(Node* node)
3002 {
3003     switch (node->child1().useKind()) {
3004     case Int32Use: {
3005         SpeculateInt32Operand op1(this, node->child1());
3006         GPRTemporary result(this);
3007
3008         m_jit.move(op1.gpr(), result.gpr());
3009
3010         // Note: there is no notion of being not used as a number, but someone
3011         // caring about negative zero.
3012         
3013         if (bytecodeCanTruncateInteger(node->arithNodeFlags()))
3014             m_jit.neg32(result.gpr());
3015         else if (bytecodeCanIgnoreNegativeZero(node->arithNodeFlags()))
3016             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
3017         else {
3018             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
3019             m_jit.neg32(result.gpr());
3020         }
3021
3022         int32Result(result.gpr(), node);
3023         return;
3024     }
3025
3026 #if USE(JSVALUE64)
3027     case MachineIntUse: {
3028         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)) {
3029             SpeculateWhicheverInt52Operand op1(this, node->child1());
3030             GPRTemporary result(this);
3031             GPRReg op1GPR = op1.gpr();
3032             GPRReg resultGPR = result.gpr();
3033             m_jit.move(op1GPR, resultGPR);
3034             m_jit.neg64(resultGPR);
3035             if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3036                 speculationCheck(
3037                     NegativeZero, JSValueRegs(), 0,
3038                     m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3039             }
3040             int52Result(resultGPR, node, op1.format());
3041             return;
3042         }
3043         
3044         SpeculateInt52Operand op1(this, node->child1());
3045         GPRTemporary result(this);
3046         GPRReg op1GPR = op1.gpr();
3047         GPRReg resultGPR = result.gpr();
3048         m_jit.move(op1GPR, resultGPR);
3049         speculationCheck(
3050             Int52Overflow, JSValueRegs(), 0,
3051             m_jit.branchNeg64(MacroAssembler::Overflow, resultGPR));
3052         if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3053             speculationCheck(
3054                 NegativeZero, JSValueRegs(), 0,
3055                 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3056         }
3057         int52Result(resultGPR, node);
3058         return;
3059     }
3060 #endif // USE(JSVALUE64)
3061         
3062     case NumberUse: {
3063         SpeculateDoubleOperand op1(this, node->child1());
3064         FPRTemporary result(this);
3065         
3066         m_jit.negateDouble(op1.fpr(), result.fpr());
3067         
3068         doubleResult(result.fpr(), node);
3069         return;
3070     }
3071         
3072     default:
3073         RELEASE_ASSERT_NOT_REACHED();
3074         return;
3075     }
3076 }
3077 void SpeculativeJIT::compileArithIMul(Node* node)
3078 {
3079     SpeculateInt32Operand op1(this, node->child1());
3080     SpeculateInt32Operand op2(this, node->child2());
3081     GPRTemporary result(this);
3082
3083     GPRReg reg1 = op1.gpr();
3084     GPRReg reg2 = op2.gpr();
3085
3086     m_jit.move(reg1, result.gpr());
3087     m_jit.mul32(reg2, result.gpr());
3088     int32Result(result.gpr(), node);
3089     return;
3090 }
3091
3092 void SpeculativeJIT::compileArithMul(Node* node)
3093 {
3094     switch (node->binaryUseKind()) {
3095     case Int32Use: {
3096         SpeculateInt32Operand op1(this, node->child1());
3097         SpeculateInt32Operand op2(this, node->child2());
3098         GPRTemporary result(this);
3099
3100         GPRReg reg1 = op1.gpr();
3101         GPRReg reg2 = op2.gpr();
3102
3103         // We can perform truncated multiplications if we get to this point, because if the
3104         // fixup phase could not prove that it would be safe, it would have turned us into
3105         // a double multiplication.
3106         if (bytecodeCanTruncateInteger(node->arithNodeFlags())) {
3107             m_jit.move(reg1, result.gpr());
3108             m_jit.mul32(reg2, result.gpr());
3109         } else {
3110             speculationCheck(
3111                 Overflow, JSValueRegs(), 0,
3112                 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3113         }
3114             
3115         // Check for negative zero, if the users of this node care about such things.
3116         if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3117             MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3118             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
3119             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
3120             resultNonZero.link(&m_jit);
3121         }
3122
3123         int32Result(result.gpr(), node);
3124         return;
3125     }
3126     
3127 #if USE(JSVALUE64)   
3128     case MachineIntUse: {
3129         // This is super clever. We want to do an int52 multiplication and check the
3130         // int52 overflow bit. There is no direct hardware support for this, but we do
3131         // have the ability to do an int64 multiplication and check the int64 overflow
3132         // bit. We leverage that. Consider that a, b are int52 numbers inside int64
3133         // registers, with the high 12 bits being sign-extended. We can do:
3134         //
3135         //     (a * (b << 12))
3136         //
3137         // This will give us a left-shifted int52 (value is in high 52 bits, low 16
3138         // bits are zero) plus the int52 overflow bit. I.e. whether this 64-bit
3139         // multiplication overflows is identical to whether the 'a * b' 52-bit
3140         // multiplication overflows.
3141         //
3142         // In our nomenclature, this is:
3143         //
3144         //     strictInt52(a) * int52(b) => int52
3145         //
3146         // That is "strictInt52" means unshifted and "int52" means left-shifted by 16
3147         // bits.
3148         //
3149         // We don't care which of op1 or op2 serves as the left-shifted operand, so
3150         // we just do whatever is more convenient for op1 and have op2 do the
3151         // opposite. This ensures that we do at most one shift.
3152
3153         SpeculateWhicheverInt52Operand op1(this, node->child1());
3154         SpeculateWhicheverInt52Operand op2(this, node->child2(), OppositeShift, op1);
3155         GPRTemporary result(this);
3156         
3157         GPRReg op1GPR = op1.gpr();
3158         GPRReg op2GPR = op2.gpr();
3159         GPRReg resultGPR = result.gpr();
3160         
3161         m_jit.move(op1GPR, resultGPR);
3162         speculationCheck(
3163             Int52Overflow, JSValueRegs(), 0,
3164             m_jit.branchMul64(MacroAssembler::Overflow, op2GPR, resultGPR));
3165         
3166         if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3167             MacroAssembler::Jump resultNonZero = m_jit.branchTest64(
3168                 MacroAssembler::NonZero, resultGPR);
3169             speculationCheck(
3170                 NegativeZero, JSValueRegs(), 0,
3171                 m_jit.branch64(MacroAssembler::LessThan, op1GPR, TrustedImm64(0)));
3172             speculationCheck(
3173                 NegativeZero, JSValueRegs(), 0,
3174                 m_jit.branch64(MacroAssembler::LessThan, op2GPR, TrustedImm64(0)));
3175             resultNonZero.link(&m_jit);
3176         }
3177         
3178         int52Result(resultGPR, node);
3179         return;
3180     }
3181 #endif // USE(JSVALUE64)
3182         
3183     case NumberUse: {
3184         SpeculateDoubleOperand op1(this, node->child1());
3185         SpeculateDoubleOperand op2(this, node->child2());
3186         FPRTemporary result(this, op1, op2);
3187         
3188         FPRReg reg1 = op1.fpr();
3189         FPRReg reg2 = op2.fpr();
3190         
3191         m_jit.mulDouble(reg1, reg2, result.fpr());
3192         
3193         doubleResult(result.fpr(), node);
3194         return;
3195     }
3196         
3197     default:
3198         RELEASE_ASSERT_NOT_REACHED();
3199         return;
3200     }
3201 }
3202
3203 void SpeculativeJIT::compileArithDiv(Node* node)
3204 {
3205     switch (node->binaryUseKind()) {
3206     case Int32Use: {
3207 #if CPU(X86) || CPU(X86_64)
3208         SpeculateInt32Operand op1(this, node->child1());
3209         SpeculateInt32Operand op2(this, node->child2());
3210         GPRTemporary eax(this, X86Registers::eax);
3211         GPRTemporary edx(this, X86Registers::edx);
3212         GPRReg op1GPR = op1.gpr();
3213         GPRReg op2GPR = op2.gpr();
3214     
3215         GPRReg op2TempGPR;
3216         GPRReg temp;
3217         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3218             op2TempGPR = allocate();
3219             temp = op2TempGPR;
3220         } else {
3221             op2TempGPR = InvalidGPRReg;
3222             if (op1GPR == X86Registers::eax)
3223                 temp = X86Registers::edx;
3224             else
3225                 temp = X86Registers::eax;
3226         }
3227     
3228         ASSERT(temp != op1GPR);
3229         ASSERT(temp != op2GPR);
3230     
3231         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3232     
3233         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3234     
3235         JITCompiler::JumpList done;
3236         if (bytecodeUsesAsNumber(node->arithNodeFlags())) {
3237             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3238             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3239         } else {
3240             // This is the case where we convert the result to an int after we're done, and we
3241             // already know that the denominator is either -1 or 0. So, if the denominator is
3242             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3243             // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
3244             // are happy to fall through to a normal division, since we're just dividing
3245             // something by negative 1.
3246         
3247             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3248             m_jit.move(TrustedImm32(0), eax.gpr());
3249             done.append(m_jit.jump());
3250         
3251             notZero.link(&m_jit);
3252             JITCompiler::Jump notNeg2ToThe31 =
3253                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3254             m_jit.zeroExtend32ToPtr(op1GPR, eax.gpr());
3255             done.append(m_jit.jump());
3256         
3257             notNeg2ToThe31.link(&m_jit);
3258         }
3259     
3260         safeDenominator.link(&m_jit);
3261     
3262         // If the user cares about negative zero, then speculate that we're not about
3263         // to produce negative zero.
3264         if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3265             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3266             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3267             numeratorNonZero.link(&m_jit);
3268         }
3269     
3270         if (op2TempGPR != InvalidGPRReg) {
3271             m_jit.move(op2GPR, op2TempGPR);
3272             op2GPR = op2TempGPR;
3273         }
3274             
3275         m_jit.move(op1GPR, eax.gpr());
3276         m_jit.assembler().cdq();
3277         m_jit.assembler().idivl_r(op2GPR);
3278             
3279         if (op2TempGPR != InvalidGPRReg)
3280             unlock(op2TempGPR);
3281
3282         // Check that there was no remainder. If there had been, then we'd be obligated to
3283         // produce a double result instead.
3284         if (bytecodeUsesAsNumber(node->arithNodeFlags()))
3285             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
3286         
3287         done.link(&m_jit);
3288         int32Result(eax.gpr(), node);
3289 #elif CPU(APPLE_ARMV7S)
3290         SpeculateInt32Operand op1(this, node->child1());
3291         SpeculateInt32Operand op2(this, node->child2());
3292         GPRReg op1GPR = op1.gpr();
3293         GPRReg op2GPR = op2.gpr();
3294         GPRTemporary quotient(this);
3295         GPRTemporary multiplyAnswer(this);
3296
3297         // If the user cares about negative zero, then speculate that we're not about
3298         // to produce negative zero.
3299         if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3300             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3301             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3302             numeratorNonZero.link(&m_jit);
3303         }
3304
3305         m_jit.assembler().sdiv(quotient.gpr(), op1GPR, op2GPR);
3306
3307         // Check that there was no remainder. If there had been, then we'd be obligated to
3308         // produce a double result instead.
3309         if (bytecodeUsesAsNumber(node->arithNodeFlags())) {
3310             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
3311             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
3312         }
3313
3314         int32Result(quotient.gpr(), node);
3315 #elif CPU(ARM64)
3316         SpeculateInt32Operand op1(this, node->child1());
3317         SpeculateInt32Operand op2(this, node->child2());
3318         GPRReg op1GPR = op1.gpr();
3319         GPRReg op2GPR = op2.gpr();
3320         GPRTemporary quotient(this);
3321         GPRTemporary multiplyAnswer(this);
3322
3323         // If the user cares about negative zero, then speculate that we're not about
3324         // to produce negative zero.
3325         if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3326             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3327             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3328             numeratorNonZero.link(&m_jit);
3329         }
3330
3331         m_jit.assembler().sdiv<32>(quotient.gpr(), op1GPR, op2GPR);
3332
3333         // Check that there was no remainder. If there had been, then we'd be obligated to
3334         // produce a double result instead.
3335         if (bytecodeUsesAsNumber(node->arithNodeFlags())) {
3336             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
3337             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
3338         }
3339
3340         int32Result(quotient.gpr(), node);
3341 #else
3342         RELEASE_ASSERT_NOT_REACHED();
3343 #endif
3344         break;
3345     }
3346         
3347     case NumberUse: {
3348         SpeculateDoubleOperand op1(this, node->child1());
3349         SpeculateDoubleOperand op2(this, node->child2());
3350         FPRTemporary result(this, op1);
3351         
3352         FPRReg reg1 = op1.fpr();
3353         FPRReg reg2 = op2.fpr();
3354         m_jit.divDouble(reg1, reg2, result.fpr());
3355         
3356         doubleResult(result.fpr(), node);
3357         break;
3358     }
3359         
3360     default:
3361         RELEASE_ASSERT_NOT_REACHED();
3362         break;
3363     }
3364 }
3365
3366 void SpeculativeJIT::compileArithMod(Node* node)
3367 {
3368     switch (node->binaryUseKind()) {
3369     case Int32Use: {
3370         // In the fast path, the dividend value could be the final result
3371         // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
3372         SpeculateStrictInt32Operand op1(this, node->child1());
3373         
3374         if (isInt32Constant(node->child2().node())) {
3375             int32_t divisor = valueOfInt32Constant(node->child2().node());
3376             if (divisor > 1 && hasOneBitSet(divisor)) {
3377                 unsigned logarithm = WTF::fastLog2(divisor);
3378                 GPRReg dividendGPR = op1.gpr();
3379                 GPRTemporary result(this);
3380                 GPRReg resultGPR = result.gpr();
3381
3382                 // This is what LLVM generates. It's pretty crazy. Here's my
3383                 // attempt at understanding it.
3384                 
3385                 // First, compute either divisor - 1, or 0, depending on whether
3386                 // the dividend is negative:
3387                 //
3388                 // If dividend < 0:  resultGPR = divisor - 1
3389                 // If dividend >= 0: resultGPR = 0
3390                 m_jit.move(dividendGPR, resultGPR);
3391                 m_jit.rshift32(TrustedImm32(31), resultGPR);
3392                 m_jit.urshift32(TrustedImm32(32 - logarithm), resultGPR);
3393                 
3394                 // Add in the dividend, so that:
3395                 //
3396                 // If dividend < 0:  resultGPR = dividend + divisor - 1
3397                 // If dividend >= 0: resultGPR = dividend
3398                 m_jit.add32(dividendGPR, resultGPR);
3399                 
3400                 // Mask so as to only get the *high* bits. This rounds down
3401                 // (towards negative infinity) resultGPR to the nearest multiple
3402                 // of divisor, so that:
3403                 //
3404                 // If dividend < 0:  resultGPR = floor((dividend + divisor - 1) / divisor)
3405                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3406                 //
3407                 // Note that this can be simplified to:
3408                 //
3409                 // If dividend < 0:  resultGPR = ceil(dividend / divisor)
3410                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3411                 //
3412                 // Note that if the dividend is negative, resultGPR will also be negative.
3413                 // Regardless of the sign of dividend, resultGPR will be rounded towards
3414                 // zero, because of how things are conditionalized.
3415                 m_jit.and32(TrustedImm32(-divisor), resultGPR);
3416                 
3417                 // Subtract resultGPR from dividendGPR, which yields the remainder:
3418                 //
3419                 // resultGPR = dividendGPR - resultGPR
3420                 m_jit.neg32(resultGPR);
3421                 m_jit.add32(dividendGPR, resultGPR);
3422                 
3423                 if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3424                     // Check that we're not about to create negative zero.
3425                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3426                     speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, resultGPR));
3427                     numeratorPositive.link(&m_jit);
3428                 }
3429
3430                 int32Result(resultGPR, node);
3431                 return;
3432             }
3433         }
3434         
3435 #if CPU(X86) || CPU(X86_64)
3436         if (isInt32Constant(node->child2().node())) {
3437             int32_t divisor = valueOfInt32Constant(node->child2().node());
3438             if (divisor && divisor != -1) {
3439                 GPRReg op1Gpr = op1.gpr();
3440
3441                 GPRTemporary eax(this, X86Registers::eax);
3442                 GPRTemporary edx(this, X86Registers::edx);
3443                 GPRTemporary scratch(this);
3444                 GPRReg scratchGPR = scratch.gpr();
3445
3446                 GPRReg op1SaveGPR;
3447                 if (op1Gpr == X86Registers::eax || op1Gpr == X86Registers::edx) {
3448                     op1SaveGPR = allocate();
3449                     ASSERT(op1Gpr != op1SaveGPR);
3450                     m_jit.move(op1Gpr, op1SaveGPR);
3451                 } else
3452                     op1SaveGPR = op1Gpr;
3453                 ASSERT(op1SaveGPR != X86Registers::eax);
3454                 ASSERT(op1SaveGPR != X86Registers::edx);
3455
3456                 m_jit.move(op1Gpr, eax.gpr());
3457                 m_jit.move(TrustedImm32(divisor), scratchGPR);
3458                 m_jit.assembler().cdq();
3459                 m_jit.assembler().idivl_r(scratchGPR);
3460                 if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3461                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3462                     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3463                     numeratorPositive.link(&m_jit);
3464                 }
3465             
3466                 if (op1SaveGPR != op1Gpr)
3467                     unlock(op1SaveGPR);
3468
3469                 int32Result(edx.gpr(), node);
3470                 return;
3471             }
3472         }
3473 #endif
3474
3475         SpeculateInt32Operand op2(this, node->child2());
3476 #if CPU(X86) || CPU(X86_64)
3477         GPRTemporary eax(this, X86Registers::eax);
3478         GPRTemporary edx(this, X86Registers::edx);
3479         GPRReg op1GPR = op1.gpr();
3480         GPRReg op2GPR = op2.gpr();
3481     
3482         GPRReg op2TempGPR;
3483         GPRReg temp;
3484         GPRReg op1SaveGPR;
3485     
3486         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3487             op2TempGPR = allocate();
3488             temp = op2TempGPR;
3489         } else {
3490             op2TempGPR = InvalidGPRReg;
3491             if (op1GPR == X86Registers::eax)
3492                 temp = X86Registers::edx;
3493             else
3494                 temp = X86Registers::eax;
3495         }
3496     
3497         if (op1GPR == X86Registers::eax || op1GPR == X86Registers::edx) {
3498             op1SaveGPR = allocate();
3499             ASSERT(op1GPR != op1SaveGPR);
3500             m_jit.move(op1GPR, op1SaveGPR);
3501         } else
3502             op1SaveGPR = op1GPR;
3503     
3504         ASSERT(temp != op1GPR);
3505         ASSERT(temp != op2GPR);
3506         ASSERT(op1SaveGPR != X86Registers::eax);
3507         ASSERT(op1SaveGPR != X86Registers::edx);
3508     
3509         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3510     
3511         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3512     
3513         JITCompiler::JumpList done;
3514         
3515         // FIXME: -2^31 / -1 will actually yield negative zero, so we could have a
3516         // separate case for that. But it probably doesn't matter so much.
3517         if (bytecodeUsesAsNumber(node->arithNodeFlags())) {
3518             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3519             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3520         } else {
3521             // This is the case where we convert the result to an int after we're done, and we
3522             // already know that the denominator is either -1 or 0. So, if the denominator is
3523             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3524             // -1) and the numerator is -2^31 then the result should be 0. Otherwise we are
3525             // happy to fall through to a normal division, since we're just dividing something
3526             // by negative 1.
3527         
3528             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3529             m_jit.move(TrustedImm32(0), edx.gpr());
3530             done.append(m_jit.jump());
3531         
3532             notZero.link(&m_jit);
3533             JITCompiler::Jump notNeg2ToThe31 =
3534                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3535             m_jit.move(TrustedImm32(0), edx.gpr());
3536             done.append(m_jit.jump());
3537         
3538             notNeg2ToThe31.link(&m_jit);
3539         }
3540         
3541         safeDenominator.link(&m_jit);
3542             
3543         if (op2TempGPR != InvalidGPRReg) {
3544             m_jit.move(op2GPR, op2TempGPR);
3545             op2GPR = op2TempGPR;
3546         }
3547             
3548         m_jit.move(op1GPR, eax.gpr());
3549         m_jit.assembler().cdq();
3550         m_jit.assembler().idivl_r(op2GPR);
3551             
3552         if (op2TempGPR != InvalidGPRReg)
3553             unlock(op2TempGPR);
3554
3555         // Check that we're not about to create negative zero.
3556         if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3557             JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3558             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3559             numeratorPositive.link(&m_jit);
3560         }
3561     
3562         if (op1SaveGPR != op1GPR)
3563             unlock(op1SaveGPR);
3564             
3565         done.link(&m_jit);
3566         int32Result(edx.gpr(), node);
3567
3568 #elif CPU(APPLE_ARMV7S)
3569         GPRTemporary temp(this);
3570         GPRTemporary quotientThenRemainder(this);
3571         GPRTemporary multiplyAnswer(this);
3572         GPRReg dividendGPR = op1.gpr();
3573         GPRReg divisorGPR = op2.gpr();
3574         GPRReg quotientThenRemainderGPR = quotientThenRemainder.gpr();
3575         GPRReg multiplyAnswerGPR = multiplyAnswer.gpr();
3576
3577         m_jit.assembler().sdiv(quotientThenRemainderGPR, dividendGPR, divisorGPR);
3578         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotientThenRemainderGPR, divisorGPR, multiplyAnswerGPR));
3579         m_jit.assembler().sub(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3580
3581         // If the user cares about negative zero, then speculate that we're not about
3582         // to produce negative zero.
3583         if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3584             // Check that we're not about to create negative zero.
3585             JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3586             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, quotientThenRemainderGPR));
3587             numeratorPositive.link(&m_jit);
3588         }
3589
3590         int32Result(quotientThenRemainderGPR, node);
3591 #elif CPU(ARM64)
3592         GPRTemporary temp(this);
3593         GPRTemporary quotientThenRemainder(this);
3594         GPRTemporary multiplyAnswer(this);
3595         GPRReg dividendGPR = op1.gpr();
3596         GPRReg divisorGPR = op2.gpr();
3597         GPRReg quotientThenRemainderGPR = quotientThenRemainder.gpr();
3598         GPRReg multiplyAnswerGPR = multiplyAnswer.gpr();
3599
3600         m_jit.assembler().sdiv<32>(quotientThenRemainderGPR, dividendGPR, divisorGPR);
3601         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotientThenRemainderGPR, divisorGPR, multiplyAnswerGPR));
3602         m_jit.assembler().sub<32>(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3603
3604         // If the user cares about negative zero, then speculate that we're not about
3605         // to produce negative zero.
3606         if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3607             // Check that we're not about to create negative zero.
3608             JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3609             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, quotientThenRemainderGPR));
3610             numeratorPositive.link(&m_jit);
3611         }
3612
3613         int32Result(quotientThenRemainderGPR, node);
3614 #else // not architecture that can do integer division
3615         RELEASE_ASSERT_NOT_REACHED();
3616 #endif
3617         return;
3618     }
3619         
3620     case NumberUse: {
3621         SpeculateDoubleOperand op1(this, node->child1());
3622         SpeculateDoubleOperand op2(this, node->child2());
3623         
3624         FPRReg op1FPR = op1.fpr();
3625         FPRReg op2FPR = op2.fpr();
3626         
3627         flushRegisters();
3628         
3629         FPRResult result(this);
3630         
3631         callOperation(fmodAsDFGOperation, result.fpr(), op1FPR, op2FPR);
3632         
3633         doubleResult(result.fpr(), node);
3634         return;
3635     }
3636         
3637     default:
3638         RELEASE_ASSERT_NOT_REACHED();
3639         return;
3640     }
3641 }
3642
3643 // Returns true if the compare is fused with a subsequent branch.
3644 bool SpeculativeJIT::compare(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
3645 {
3646     if (compilePeepHoleBranch(node, condition, doubleCondition, operation))
3647         return true;
3648
3649     if (node->isBinaryUseKind(Int32Use)) {
3650         compileInt32Compare(node, condition);
3651         return false;
3652     }
3653     
3654 #if USE(JSVALUE64)
3655     if (node->isBinaryUseKind(MachineIntUse)) {
3656         compileInt52Compare(node, condition);
3657         return false;
3658     }
3659 #endif // USE(JSVALUE64)
3660     
3661     if (node->isBinaryUseKind(NumberUse)) {
3662         compileDoubleCompare(node, doubleCondition);
3663         return false;
3664     }
3665     
3666     if (node->op() == CompareEq) {
3667         if (node->isBinaryUseKind(StringUse)) {
3668             compileStringEquality(node);
3669             return false;
3670         }
3671         
3672         if (node->isBinaryUseKind(BooleanUse)) {
3673             compileBooleanCompare(node, condition);
3674             return false;
3675         }
3676
3677         if (node->isBinaryUseKind(StringIdentUse)) {
3678             compileStringIdentEquality(node);
3679             return false;
3680         }
3681         
3682         if (node->isBinaryUseKind(ObjectUse)) {
3683             compileObjectEquality(node);
3684             return false;
3685         }
3686         
3687         if (node->child1().useKind() == ObjectUse && node->child2().useKind() == ObjectOrOtherUse) {
3688             compileObjectToObjectOrOtherEquality(node->child1(), node->child2());
3689             return false;
3690         }
3691         
3692         if (node->child1().useKind() == ObjectOrOtherUse && node->child2().useKind() == ObjectUse) {
3693             compileObjectToObjectOrOtherEquality(node->child2(), node->child1());
3694             return false;
3695         }
3696     }
3697     
3698     nonSpeculativeNonPeepholeCompare(node, condition, operation);
3699     return false;
3700 }
3701
3702 bool SpeculativeJIT::compileStrictEqForConstant(Node* node, Edge value, JSValue constant)
3703 {
3704     JSValueOperand op1(this, value);
3705     
3706     // FIXME: This code is wrong for the case that the constant is null or undefined,
3707     // and the value is an object that MasqueradesAsUndefined.
3708     // https://bugs.webkit.org/show_bug.cgi?id=109487
3709     
3710     unsigned branchIndexInBlock = detectPeepHoleBranch();
3711     if (branchIndexInBlock != UINT_MAX) {
3712         Node* branchNode = m_block->at(branchIndexInBlock);
3713         BasicBlock* taken = branchNode->takenBlock();
3714         BasicBlock* notTaken = branchNode->notTakenBlock();
3715         MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
3716         
3717         // The branch instruction will branch to the taken block.
3718         // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
3719         if (taken == nextBlock()) {
3720             condition = MacroAssembler::NotEqual;
3721             BasicBlock* tmp = taken;
3722             taken = notTaken;
3723             notTaken = tmp;
3724         }
3725
3726 #if USE(JSVALUE64)
3727         branch64(condition, op1.gpr(), MacroAssembler::TrustedImm64(JSValue::encode(constant)), taken);
3728 #else
3729         GPRReg payloadGPR = op1.payloadGPR();
3730         GPRReg tagGPR = op1.tagGPR();
3731         if (condition == MacroAssembler::Equal) {
3732             // Drop down if not equal, go elsewhere if equal.
3733             MacroAssembler::Jump notEqual = m_jit.branch32(MacroAssembler::NotEqual, tagGPR, MacroAssembler::Imm32(constant.tag()));
3734             branch32(MacroAssembler::Equal, payloadGPR, MacroAssembler::Imm32(constant.payload()), taken);
3735             notEqual.link(&m_jit);
3736         } else {
3737             // Drop down if equal, go elsehwere if not equal.
3738             branch32(MacroAssembler::NotEqual, tagGPR, MacroAssembler::Imm32(constant.tag()), taken);
3739             branch32(MacroAssembler::NotEqual, payloadGPR, MacroAssembler::Imm32(constant.payload()), taken);
3740         }
3741 #endif
3742         
3743         jump(notTaken);
3744         
3745         use(node->child1());
3746         use(node->child2());
3747         m_indexInBlock = branchIndexInBlock;
3748         m_currentNode = branchNode;
3749         return true;
3750     }
3751     
3752     GPRTemporary result(this);
3753     
3754 #if USE(JSVALUE64)
3755     GPRReg op1GPR = op1.gpr();
3756     GPRReg resultGPR = result.gpr();
3757     m_jit.move(MacroAssembler::TrustedImm64(ValueFalse), resultGPR);
3758     MacroAssembler::Jump notEqual = m_jit.branch64(MacroAssembler::NotEqual, op1GPR, MacroAssembler::TrustedImm64(JSValue::encode(constant)));
3759     m_jit.or32(MacroAssembler::TrustedImm32(1), resultGPR);
3760     notEqual.link(&m_jit);
3761     jsValueResult(resultGPR, node, DataFormatJSBoolean);
3762 #else
3763     GPRReg op1PayloadGPR = op1.payloadGPR();
3764     GPRReg op1TagGPR = op1.tagGPR();
3765     GPRReg resultGPR = result.gpr();
3766     m_jit.move(TrustedImm32(0), resultGPR);
3767     MacroAssembler::JumpList notEqual;
3768     notEqual.append(m_jit.branch32(MacroAssembler::NotEqual, op1TagGPR, MacroAssembler::Imm32(constant.tag())));
3769     notEqual.append(m_jit.branch32(MacroAssembler::NotEqual, op1PayloadGPR, MacroAssembler::Imm32(constant.payload())));
3770     m_jit.move(TrustedImm32(1), resultGPR);
3771     notEqual.link(&m_jit);
3772     booleanResult(resultGPR, node);
3773 #endif
3774     
3775     return false;
3776 }
3777
3778 bool SpeculativeJIT::compileStrictEq(Node* node)
3779 {
3780     switch (node->binaryUseKind()) {
3781     case BooleanUse: {
3782         unsigned branchIndexInBlock = detectPeepHoleBranch();
3783         if (branchIndexInBlock != UINT_MAX) {
3784             Node* branchNode = m_block->at(branchIndexInBlock);
3785             compilePeepHoleBooleanBranch(node, branchNode, MacroAssembler::Equal);
3786             use(node->child1());
3787             use(node->child2());
3788             m_indexInBlock = branchIndexInBlock;
3789             m_currentNode = branchNode;
3790             return true;
3791         }
3792         compileBooleanCompare(node, MacroAssembler::Equal);
3793         return false;
3794     }
3795
3796     case Int32Use: {
3797         unsigned branchIndexInBlock&n