f3775e821605ce4a47a4c83ff6917d98ebec1c42
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "Arguments.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGBinarySwitch.h"
35 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
36 #include "DFGSaneStringGetByValSlowPathGenerator.h"
37 #include "DFGSlowPathGenerator.h"
38 #include "JSCJSValueInlines.h"
39 #include "LinkBuffer.h"
40
41 namespace JSC { namespace DFG {
42
43 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
44     : m_compileOkay(true)
45     , m_jit(jit)
46     , m_currentNode(0)
47     , m_indexInBlock(0)
48     , m_generationInfo(m_jit.codeBlock()->m_numCalleeRegisters)
49     , m_arguments(jit.codeBlock()->numParameters())
50     , m_variables(jit.graph().m_localVars)
51     , m_lastSetOperand(VirtualRegister())
52     , m_state(m_jit.graph())
53     , m_interpreter(m_jit.graph(), m_state)
54     , m_stream(&jit.jitCode()->variableEventStream)
55     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
56     , m_isCheckingArgumentTypes(false)
57 {
58 }
59
60 SpeculativeJIT::~SpeculativeJIT()
61 {
62 }
63
64 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
65 {
66     ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
67     
68     GPRTemporary scratch(this);
69     GPRTemporary scratch2(this);
70     GPRReg scratchGPR = scratch.gpr();
71     GPRReg scratch2GPR = scratch2.gpr();
72     
73     unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
74     
75     JITCompiler::JumpList slowCases;
76     
77     slowCases.append(
78         emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
79     m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
80     emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
81     
82     m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
83     m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
84     
85     if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
86 #if USE(JSVALUE64)
87         m_jit.move(TrustedImm64(bitwise_cast<int64_t>(QNaN)), scratchGPR);
88         for (unsigned i = numElements; i < vectorLength; ++i)
89             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
90 #else
91         EncodedValueDescriptor value;
92         value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, QNaN));
93         for (unsigned i = numElements; i < vectorLength; ++i) {
94             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
95             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
96         }
97 #endif
98     }
99     
100     // I want a slow path that also loads out the storage pointer, and that's
101     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
102     // of work for a very small piece of functionality. :-/
103     addSlowPathGenerator(adoptPtr(
104         new CallArrayAllocatorSlowPathGenerator(
105             slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
106             structure, numElements)));
107 }
108
109 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
110 {
111     if (!m_compileOkay)
112         return;
113     ASSERT(m_isCheckingArgumentTypes || m_canExit);
114     m_jit.appendExitInfo(jumpToFail);
115     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
116 }
117
118 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
119 {
120     if (!m_compileOkay)
121         return;
122     ASSERT(m_isCheckingArgumentTypes || m_canExit);
123     m_jit.appendExitInfo(jumpsToFail);
124     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
125 }
126
127 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
128 {
129     if (!m_compileOkay)
130         return;
131     backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail);
132     if (m_speculationDirection == ForwardSpeculation)
133         convertLastOSRExitToForward();
134 }
135
136 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
137 {
138     ASSERT(m_isCheckingArgumentTypes || m_canExit);
139     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
140 }
141
142 OSRExitJumpPlaceholder SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
143 {
144     if (!m_compileOkay)
145         return OSRExitJumpPlaceholder();
146     ASSERT(m_isCheckingArgumentTypes || m_canExit);
147     unsigned index = m_jit.jitCode()->osrExit.size();
148     m_jit.appendExitInfo();
149     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
150     return OSRExitJumpPlaceholder(index);
151 }
152
153 OSRExitJumpPlaceholder SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
154 {
155     ASSERT(m_isCheckingArgumentTypes || m_canExit);
156     return backwardSpeculationCheck(kind, jsValueSource, nodeUse.node());
157 }
158
159 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
160 {
161     if (!m_compileOkay)
162         return;
163     backwardSpeculationCheck(kind, jsValueSource, node, jumpsToFail);
164     if (m_speculationDirection == ForwardSpeculation)
165         convertLastOSRExitToForward();
166 }
167
168 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
169 {
170     ASSERT(m_isCheckingArgumentTypes || m_canExit);
171     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
172 }
173
174 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
175 {
176     if (!m_compileOkay)
177         return;
178     ASSERT(m_isCheckingArgumentTypes || m_canExit);
179     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
180     m_jit.appendExitInfo(jumpToFail);
181     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
182 }
183
184 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
185 {
186     ASSERT(m_isCheckingArgumentTypes || m_canExit);
187     backwardSpeculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
188 }
189
190 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
191 {
192     if (!m_compileOkay)
193         return;
194     backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail, recovery);
195     if (m_speculationDirection == ForwardSpeculation)
196         convertLastOSRExitToForward();
197 }
198
199 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge edge, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
200 {
201     speculationCheck(kind, jsValueSource, edge.node(), jumpToFail, recovery);
202 }
203
204 JumpReplacementWatchpoint* SpeculativeJIT::speculationWatchpoint(ExitKind kind, JSValueSource jsValueSource, Node* node)
205 {
206     if (!m_compileOkay)
207         return 0;
208     ASSERT(m_isCheckingArgumentTypes || m_canExit);
209     m_jit.appendExitInfo(JITCompiler::JumpList());
210     OSRExit& exit = m_jit.jitCode()->osrExit[
211         m_jit.jitCode()->appendOSRExit(OSRExit(
212             kind, jsValueSource,
213             m_jit.graph().methodOfGettingAValueProfileFor(node),
214             this, m_stream->size()))];
215     exit.m_watchpointIndex = m_jit.jitCode()->appendWatchpoint(
216         JumpReplacementWatchpoint(m_jit.watchpointLabel()));
217     if (m_speculationDirection == ForwardSpeculation)
218         convertLastOSRExitToForward();
219     return &m_jit.jitCode()->watchpoints[exit.m_watchpointIndex];
220 }
221
222 JumpReplacementWatchpoint* SpeculativeJIT::speculationWatchpoint(ExitKind kind)
223 {
224     return speculationWatchpoint(kind, JSValueSource(), 0);
225 }
226
227 void SpeculativeJIT::convertLastOSRExitToForward(const ValueRecovery& valueRecovery)
228 {
229     m_jit.jitCode()->lastOSRExit().convertToForward(
230         m_block, m_currentNode, m_indexInBlock, valueRecovery);
231 }
232
233 void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const ValueRecovery& valueRecovery)
234 {
235     ASSERT(m_isCheckingArgumentTypes || m_canExit);
236     backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail);
237     convertLastOSRExitToForward(valueRecovery);
238 }
239
240 void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail, const ValueRecovery& valueRecovery)
241 {
242     ASSERT(m_isCheckingArgumentTypes || m_canExit);
243     backwardSpeculationCheck(kind, jsValueSource, node, jumpsToFail);
244     convertLastOSRExitToForward(valueRecovery);
245 }
246
247 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
248 {
249     ASSERT(m_isCheckingArgumentTypes || m_canExit);
250 #if DFG_ENABLE(DEBUG_VERBOSE)
251     dataLogF("SpeculativeJIT was terminated.\n");
252 #endif
253     if (!m_compileOkay)
254         return;
255     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
256     m_compileOkay = false;
257 }
258
259 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
260 {
261     ASSERT(m_isCheckingArgumentTypes || m_canExit);
262     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
263 }
264
265 void SpeculativeJIT::backwardTypeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
266 {
267     ASSERT(needsTypeCheck(edge, typesPassedThrough));
268     m_interpreter.filter(edge, typesPassedThrough);
269     backwardSpeculationCheck(BadType, source, edge.node(), jumpToFail);
270 }
271
272 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
273 {
274     backwardTypeCheck(source, edge, typesPassedThrough, jumpToFail);
275     if (m_speculationDirection == ForwardSpeculation)
276         convertLastOSRExitToForward();
277 }
278
279 void SpeculativeJIT::forwardTypeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, const ValueRecovery& valueRecovery)
280 {
281     backwardTypeCheck(source, edge, typesPassedThrough, jumpToFail);
282     convertLastOSRExitToForward(valueRecovery);
283 }
284
285 void SpeculativeJIT::addSlowPathGenerator(PassOwnPtr<SlowPathGenerator> slowPathGenerator)
286 {
287     m_slowPathGenerators.append(slowPathGenerator);
288 }
289
290 void SpeculativeJIT::runSlowPathGenerators()
291 {
292 #if DFG_ENABLE(DEBUG_VERBOSE)
293     dataLogF("Running %lu slow path generators.\n", m_slowPathGenerators.size());
294 #endif
295     for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
296         m_slowPathGenerators[i]->generate(this);
297 }
298
299 // On Windows we need to wrap fmod; on other platforms we can call it directly.
300 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
301 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
302 static double DFG_OPERATION fmodAsDFGOperation(double x, double y)
303 {
304     return fmod(x, y);
305 }
306 #else
307 #define fmodAsDFGOperation fmod
308 #endif
309
310 void SpeculativeJIT::clearGenerationInfo()
311 {
312     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
313         m_generationInfo[i] = GenerationInfo();
314     m_gprs = RegisterBank<GPRInfo>();
315     m_fprs = RegisterBank<FPRInfo>();
316 }
317
318 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
319 {
320     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
321     Node* node = info.node();
322     DataFormat registerFormat = info.registerFormat();
323     ASSERT(registerFormat != DataFormatNone);
324     ASSERT(registerFormat != DataFormatDouble);
325         
326     SilentSpillAction spillAction;
327     SilentFillAction fillAction;
328         
329     if (!info.needsSpill())
330         spillAction = DoNothingForSpill;
331     else {
332 #if USE(JSVALUE64)
333         ASSERT(info.gpr() == source);
334         if (registerFormat == DataFormatInt32)
335             spillAction = Store32Payload;
336         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
337             spillAction = StorePtr;
338         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
339             spillAction = Store64;
340         else {
341             ASSERT(registerFormat & DataFormatJS);
342             spillAction = Store64;
343         }
344 #elif USE(JSVALUE32_64)
345         if (registerFormat & DataFormatJS) {
346             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
347             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
348         } else {
349             ASSERT(info.gpr() == source);
350             spillAction = Store32Payload;
351         }
352 #endif
353     }
354         
355     if (registerFormat == DataFormatInt32) {
356         ASSERT(info.gpr() == source);
357         ASSERT(isJSInt32(info.registerFormat()));
358         if (node->hasConstant()) {
359             ASSERT(isInt32Constant(node));
360             fillAction = SetInt32Constant;
361         } else
362             fillAction = Load32Payload;
363     } else if (registerFormat == DataFormatBoolean) {
364 #if USE(JSVALUE64)
365         RELEASE_ASSERT_NOT_REACHED();
366         fillAction = DoNothingForFill;
367 #elif USE(JSVALUE32_64)
368         ASSERT(info.gpr() == source);
369         if (node->hasConstant()) {
370             ASSERT(isBooleanConstant(node));
371             fillAction = SetBooleanConstant;
372         } else
373             fillAction = Load32Payload;
374 #endif
375     } else if (registerFormat == DataFormatCell) {
376         ASSERT(info.gpr() == source);
377         if (node->hasConstant()) {
378             JSValue value = valueOfJSConstant(node);
379             ASSERT_UNUSED(value, value.isCell());
380             fillAction = SetCellConstant;
381         } else {
382 #if USE(JSVALUE64)
383             fillAction = LoadPtr;
384 #else
385             fillAction = Load32Payload;
386 #endif
387         }
388     } else if (registerFormat == DataFormatStorage) {
389         ASSERT(info.gpr() == source);
390         fillAction = LoadPtr;
391     } else if (registerFormat == DataFormatInt52) {
392         if (node->hasConstant())
393             fillAction = SetInt52Constant;
394         else if (isJSInt32(info.spillFormat()) || info.spillFormat() == DataFormatJS)
395             fillAction = Load32PayloadConvertToInt52;
396         else if (info.spillFormat() == DataFormatInt52)
397             fillAction = Load64;
398         else if (info.spillFormat() == DataFormatStrictInt52)
399             fillAction = Load64ShiftInt52Left;
400         else if (info.spillFormat() == DataFormatNone)
401             fillAction = Load64;
402         else {
403             // Should never happen. Anything that qualifies as an int32 will never
404             // be turned into a cell (immediate spec fail) or a double (to-double
405             // conversions involve a separate node).
406             RELEASE_ASSERT_NOT_REACHED();
407             fillAction = Load64; // Make GCC happy.
408         }
409     } else if (registerFormat == DataFormatStrictInt52) {
410         if (node->hasConstant())
411             fillAction = SetStrictInt52Constant;
412         else if (isJSInt32(info.spillFormat()) || info.spillFormat() == DataFormatJS)
413             fillAction = Load32PayloadSignExtend;
414         else if (info.spillFormat() == DataFormatInt52)
415             fillAction = Load64ShiftInt52Right;
416         else if (info.spillFormat() == DataFormatStrictInt52)
417             fillAction = Load64;
418         else if (info.spillFormat() == DataFormatNone)
419             fillAction = Load64;
420         else {
421             // Should never happen. Anything that qualifies as an int32 will never
422             // be turned into a cell (immediate spec fail) or a double (to-double
423             // conversions involve a separate node).
424             RELEASE_ASSERT_NOT_REACHED();
425             fillAction = Load64; // Make GCC happy.
426         }
427     } else {
428         ASSERT(registerFormat & DataFormatJS);
429 #if USE(JSVALUE64)
430         ASSERT(info.gpr() == source);
431         if (node->hasConstant()) {
432             if (valueOfJSConstant(node).isCell())
433                 fillAction = SetTrustedJSConstant;
434                 fillAction = SetJSConstant;
435         } else if (info.spillFormat() == DataFormatInt32) {
436             ASSERT(registerFormat == DataFormatJSInt32);
437             fillAction = Load32PayloadBoxInt;
438         } else if (info.spillFormat() == DataFormatDouble) {
439             ASSERT(registerFormat == DataFormatJSDouble);
440             fillAction = LoadDoubleBoxDouble;
441         } else
442             fillAction = Load64;
443 #else
444         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
445         if (node->hasConstant())
446             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
447         else if (info.payloadGPR() == source)
448             fillAction = Load32Payload;
449         else { // Fill the Tag
450             switch (info.spillFormat()) {
451             case DataFormatInt32:
452                 ASSERT(registerFormat == DataFormatJSInt32);
453                 fillAction = SetInt32Tag;
454                 break;
455             case DataFormatCell:
456                 ASSERT(registerFormat == DataFormatJSCell);
457                 fillAction = SetCellTag;
458                 break;
459             case DataFormatBoolean:
460                 ASSERT(registerFormat == DataFormatJSBoolean);
461                 fillAction = SetBooleanTag;
462                 break;
463             default:
464                 fillAction = Load32Tag;
465                 break;
466             }
467         }
468 #endif
469     }
470         
471     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
472 }
473     
474 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
475 {
476     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
477     Node* node = info.node();
478     ASSERT(info.registerFormat() == DataFormatDouble);
479
480     SilentSpillAction spillAction;
481     SilentFillAction fillAction;
482         
483     if (!info.needsSpill())
484         spillAction = DoNothingForSpill;
485     else {
486         ASSERT(!node->hasConstant());
487         ASSERT(info.spillFormat() == DataFormatNone);
488         ASSERT(info.fpr() == source);
489         spillAction = StoreDouble;
490     }
491         
492 #if USE(JSVALUE64)
493     if (node->hasConstant()) {
494         ASSERT(isNumberConstant(node));
495         fillAction = SetDoubleConstant;
496     } else if (info.spillFormat() != DataFormatNone && info.spillFormat() != DataFormatDouble) {
497         // it was already spilled previously and not as a double, which means we need unboxing.
498         ASSERT(info.spillFormat() & DataFormatJS);
499         fillAction = LoadJSUnboxDouble;
500     } else
501         fillAction = LoadDouble;
502 #elif USE(JSVALUE32_64)
503     ASSERT(info.registerFormat() == DataFormatDouble || info.registerFormat() == DataFormatJSDouble);
504     if (node->hasConstant()) {
505         ASSERT(isNumberConstant(node));
506         fillAction = SetDoubleConstant;
507     } else
508         fillAction = LoadDouble;
509 #endif
510
511     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
512 }
513     
514 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
515 {
516     switch (plan.spillAction()) {
517     case DoNothingForSpill:
518         break;
519     case Store32Tag:
520         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
521         break;
522     case Store32Payload:
523         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
524         break;
525     case StorePtr:
526         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
527         break;
528 #if USE(JSVALUE64)
529     case Store64:
530         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
531         break;
532 #endif
533     case StoreDouble:
534         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
535         break;
536     default:
537         RELEASE_ASSERT_NOT_REACHED();
538     }
539 }
540     
541 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
542 {
543 #if USE(JSVALUE32_64)
544     UNUSED_PARAM(canTrample);
545 #endif
546     switch (plan.fillAction()) {
547     case DoNothingForFill:
548         break;
549     case SetInt32Constant:
550         m_jit.move(Imm32(valueOfInt32Constant(plan.node())), plan.gpr());
551         break;
552 #if USE(JSVALUE64)
553     case SetInt52Constant:
554         m_jit.move(Imm64(valueOfJSConstant(plan.node()).asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
555         break;
556     case SetStrictInt52Constant:
557         m_jit.move(Imm64(valueOfJSConstant(plan.node()).asMachineInt()), plan.gpr());
558         break;
559 #endif // USE(JSVALUE64)
560     case SetBooleanConstant:
561         m_jit.move(TrustedImm32(valueOfBooleanConstant(plan.node())), plan.gpr());
562         break;
563     case SetCellConstant:
564         m_jit.move(TrustedImmPtr(valueOfJSConstant(plan.node()).asCell()), plan.gpr());
565         break;
566 #if USE(JSVALUE64)
567     case SetTrustedJSConstant:
568         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
569         break;
570     case SetJSConstant:
571         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
572         break;
573     case SetDoubleConstant:
574         m_jit.move(Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(plan.node()))), canTrample);
575         m_jit.move64ToDouble(canTrample, plan.fpr());
576         break;
577     case Load32PayloadBoxInt:
578         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
579         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
580         break;
581     case Load32PayloadConvertToInt52:
582         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
583         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
584         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
585         break;
586     case Load32PayloadSignExtend:
587         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
588         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
589         break;
590     case LoadDoubleBoxDouble:
591         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
592         m_jit.sub64(GPRInfo::tagTypeNumberRegister, plan.gpr());
593         break;
594     case LoadJSUnboxDouble:
595         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), canTrample);
596         unboxDouble(canTrample, plan.fpr());
597         break;
598 #else
599     case SetJSConstantTag:
600         m_jit.move(Imm32(valueOfJSConstant(plan.node()).tag()), plan.gpr());
601         break;
602     case SetJSConstantPayload:
603         m_jit.move(Imm32(valueOfJSConstant(plan.node()).payload()), plan.gpr());
604         break;
605     case SetInt32Tag:
606         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
607         break;
608     case SetCellTag:
609         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
610         break;
611     case SetBooleanTag:
612         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
613         break;
614     case SetDoubleConstant:
615         m_jit.loadDouble(addressOfDoubleConstant(plan.node()), plan.fpr());
616         break;
617 #endif
618     case Load32Tag:
619         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
620         break;
621     case Load32Payload:
622         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
623         break;
624     case LoadPtr:
625         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
626         break;
627 #if USE(JSVALUE64)
628     case Load64:
629         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
630         break;
631     case Load64ShiftInt52Right:
632         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
633         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
634         break;
635     case Load64ShiftInt52Left:
636         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
637         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
638         break;
639 #endif
640     case LoadDouble:
641         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
642         break;
643     default:
644         RELEASE_ASSERT_NOT_REACHED();
645     }
646 }
647     
648 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
649 {
650     switch (arrayMode.arrayClass()) {
651     case Array::OriginalArray: {
652         CRASH();
653         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
654         return result;
655     }
656         
657     case Array::Array:
658         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
659         return m_jit.branch32(
660             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
661         
662     default:
663         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
664         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
665     }
666 }
667
668 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
669 {
670     JITCompiler::JumpList result;
671     
672     switch (arrayMode.type()) {
673     case Array::Int32:
674         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
675
676     case Array::Double:
677         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
678
679     case Array::Contiguous:
680         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
681
682     case Array::ArrayStorage:
683     case Array::SlowPutArrayStorage: {
684         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
685         
686         if (arrayMode.isJSArray()) {
687             if (arrayMode.isSlowPut()) {
688                 result.append(
689                     m_jit.branchTest32(
690                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
691                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
692                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
693                 result.append(
694                     m_jit.branch32(
695                         MacroAssembler::Above, tempGPR,
696                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
697                 break;
698             }
699             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
700             result.append(
701                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
702             break;
703         }
704         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
705         if (arrayMode.isSlowPut()) {
706             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
707             result.append(
708                 m_jit.branch32(
709                     MacroAssembler::Above, tempGPR,
710                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
711             break;
712         }
713         result.append(
714             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
715         break;
716     }
717     default:
718         CRASH();
719         break;
720     }
721     
722     return result;
723 }
724
725 void SpeculativeJIT::checkArray(Node* node)
726 {
727     ASSERT(node->arrayMode().isSpecific());
728     ASSERT(!node->arrayMode().doesConversion());
729     
730     SpeculateCellOperand base(this, node->child1());
731     GPRReg baseReg = base.gpr();
732     
733     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
734         noResult(m_currentNode);
735         return;
736     }
737     
738     const ClassInfo* expectedClassInfo = 0;
739     
740     switch (node->arrayMode().type()) {
741     case Array::String:
742         expectedClassInfo = JSString::info();
743         break;
744     case Array::Int32:
745     case Array::Double:
746     case Array::Contiguous:
747     case Array::ArrayStorage:
748     case Array::SlowPutArrayStorage: {
749         GPRTemporary temp(this);
750         GPRReg tempGPR = temp.gpr();
751         m_jit.loadPtr(
752             MacroAssembler::Address(baseReg, JSCell::structureOffset()), tempGPR);
753         m_jit.load8(MacroAssembler::Address(tempGPR, Structure::indexingTypeOffset()), tempGPR);
754         speculationCheck(
755             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
756             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
757         
758         noResult(m_currentNode);
759         return;
760     }
761     case Array::Arguments:
762         expectedClassInfo = Arguments::info();
763         break;
764     default:
765         expectedClassInfo = classInfoForType(node->arrayMode().typedArrayType());
766         break;
767     }
768     
769     RELEASE_ASSERT(expectedClassInfo);
770     
771     GPRTemporary temp(this);
772     m_jit.loadPtr(
773         MacroAssembler::Address(baseReg, JSCell::structureOffset()), temp.gpr());
774     speculationCheck(
775         BadType, JSValueSource::unboxedCell(baseReg), node,
776         m_jit.branchPtr(
777             MacroAssembler::NotEqual,
778             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
779             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
780     
781     noResult(m_currentNode);
782 }
783
784 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
785 {
786     ASSERT(node->arrayMode().doesConversion());
787     
788     GPRTemporary temp(this);
789     GPRTemporary structure;
790     GPRReg tempGPR = temp.gpr();
791     GPRReg structureGPR = InvalidGPRReg;
792     
793     if (node->op() != ArrayifyToStructure) {
794         GPRTemporary realStructure(this);
795         structure.adopt(realStructure);
796         structureGPR = structure.gpr();
797     }
798         
799     // We can skip all that comes next if we already have array storage.
800     MacroAssembler::JumpList slowPath;
801     
802     if (node->op() == ArrayifyToStructure) {
803         slowPath.append(m_jit.branchWeakPtr(
804             JITCompiler::NotEqual,
805             JITCompiler::Address(baseReg, JSCell::structureOffset()),
806             node->structure()));
807     } else {
808         m_jit.loadPtr(
809             MacroAssembler::Address(baseReg, JSCell::structureOffset()), structureGPR);
810         
811         m_jit.load8(
812             MacroAssembler::Address(structureGPR, Structure::indexingTypeOffset()), tempGPR);
813         
814         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
815     }
816     
817     addSlowPathGenerator(adoptPtr(new ArrayifySlowPathGenerator(
818         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR)));
819     
820     noResult(m_currentNode);
821 }
822
823 void SpeculativeJIT::arrayify(Node* node)
824 {
825     ASSERT(node->arrayMode().isSpecific());
826     
827     SpeculateCellOperand base(this, node->child1());
828     
829     if (!node->child2()) {
830         arrayify(node, base.gpr(), InvalidGPRReg);
831         return;
832     }
833     
834     SpeculateInt32Operand property(this, node->child2());
835     
836     arrayify(node, base.gpr(), property.gpr());
837 }
838
839 GPRReg SpeculativeJIT::fillStorage(Edge edge)
840 {
841     VirtualRegister virtualRegister = edge->virtualRegister();
842     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
843     
844     switch (info.registerFormat()) {
845     case DataFormatNone: {
846         if (info.spillFormat() == DataFormatStorage) {
847             GPRReg gpr = allocate();
848             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
849             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
850             info.fillStorage(*m_stream, gpr);
851             return gpr;
852         }
853         
854         // Must be a cell; fill it as a cell and then return the pointer.
855         return fillSpeculateCell(edge);
856     }
857         
858     case DataFormatStorage: {
859         GPRReg gpr = info.gpr();
860         m_gprs.lock(gpr);
861         return gpr;
862     }
863         
864     default:
865         return fillSpeculateCell(edge);
866     }
867 }
868
869 void SpeculativeJIT::useChildren(Node* node)
870 {
871     if (node->flags() & NodeHasVarArgs) {
872         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
873             if (!!m_jit.graph().m_varArgChildren[childIdx])
874                 use(m_jit.graph().m_varArgChildren[childIdx]);
875         }
876     } else {
877         Edge child1 = node->child1();
878         if (!child1) {
879             ASSERT(!node->child2() && !node->child3());
880             return;
881         }
882         use(child1);
883         
884         Edge child2 = node->child2();
885         if (!child2) {
886             ASSERT(!node->child3());
887             return;
888         }
889         use(child2);
890         
891         Edge child3 = node->child3();
892         if (!child3)
893             return;
894         use(child3);
895     }
896 }
897
898 void SpeculativeJIT::writeBarrier(MacroAssembler& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2, WriteBarrierUseKind useKind)
899 {
900     UNUSED_PARAM(jit);
901     UNUSED_PARAM(owner);
902     UNUSED_PARAM(scratch1);
903     UNUSED_PARAM(scratch2);
904     UNUSED_PARAM(useKind);
905     ASSERT(owner != scratch1);
906     ASSERT(owner != scratch2);
907     ASSERT(scratch1 != scratch2);
908
909 #if ENABLE(WRITE_BARRIER_PROFILING)
910     JITCompiler::emitCount(jit, WriteBarrierCounters::jitCounterFor(useKind));
911 #endif
912 }
913
914 void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind useKind, GPRReg scratch1, GPRReg scratch2)
915 {
916     UNUSED_PARAM(ownerGPR);
917     UNUSED_PARAM(valueGPR);
918     UNUSED_PARAM(scratch1);
919     UNUSED_PARAM(scratch2);
920     UNUSED_PARAM(useKind);
921
922     if (isKnownNotCell(valueUse.node()))
923         return;
924
925 #if ENABLE(WRITE_BARRIER_PROFILING)
926     JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
927 #endif
928 }
929
930 void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, JSCell* value, WriteBarrierUseKind useKind, GPRReg scratch1, GPRReg scratch2)
931 {
932     UNUSED_PARAM(ownerGPR);
933     UNUSED_PARAM(value);
934     UNUSED_PARAM(scratch1);
935     UNUSED_PARAM(scratch2);
936     UNUSED_PARAM(useKind);
937     
938     if (Heap::isMarked(value))
939         return;
940
941 #if ENABLE(WRITE_BARRIER_PROFILING)
942     JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
943 #endif
944 }
945
946 void SpeculativeJIT::writeBarrier(JSCell* owner, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind useKind, GPRReg scratch)
947 {
948     UNUSED_PARAM(owner);
949     UNUSED_PARAM(valueGPR);
950     UNUSED_PARAM(scratch);
951     UNUSED_PARAM(useKind);
952
953     if (isKnownNotCell(valueUse.node()))
954         return;
955
956 #if ENABLE(WRITE_BARRIER_PROFILING)
957     JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
958 #endif
959 }
960
961 void SpeculativeJIT::compileIn(Node* node)
962 {
963     SpeculateCellOperand base(this, node->child2());
964     GPRReg baseGPR = base.gpr();
965         
966     if (isConstant(node->child1().node())) {
967         JSString* string =
968             jsDynamicCast<JSString*>(valueOfJSConstant(node->child1().node()));
969         if (string && string->tryGetValueImpl()
970             && string->tryGetValueImpl()->isIdentifier()) {
971             GPRTemporary result(this);
972             GPRReg resultGPR = result.gpr();
973
974             use(node->child1());
975                 
976             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
977             
978             OwnPtr<SlowPathGenerator> slowPath = slowPathCall(
979                 jump.m_jump, this, operationInOptimize,
980                 JSValueRegs::payloadOnly(resultGPR), baseGPR,
981                 string->tryGetValueImpl());
982                 
983             m_jit.addIn(InRecord(
984                 node->codeOrigin, jump, slowPath.get(), safeCast<int8_t>(baseGPR),
985                 safeCast<int8_t>(resultGPR), usedRegisters()));
986             addSlowPathGenerator(slowPath.release());
987                 
988             base.use();
989                 
990 #if USE(JSVALUE64)
991             jsValueResult(
992                 resultGPR, node, DataFormatJSBoolean, UseChildrenCalledExplicitly);
993 #else
994             booleanResult(resultGPR, node, UseChildrenCalledExplicitly);
995 #endif
996             return;
997         }
998     }
999         
1000     JSValueOperand key(this, node->child1());
1001     JSValueRegs regs = key.jsValueRegs();
1002         
1003     GPRResult result(this);
1004     GPRReg resultGPR = result.gpr();
1005         
1006     base.use();
1007     key.use();
1008         
1009     flushRegisters();
1010     callOperation(
1011         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
1012         baseGPR, regs);
1013 #if USE(JSVALUE64)
1014     jsValueResult(resultGPR, node, DataFormatJSBoolean, UseChildrenCalledExplicitly);
1015 #else
1016     booleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1017 #endif
1018 }
1019
1020 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction)
1021 {
1022     unsigned branchIndexInBlock = detectPeepHoleBranch();
1023     if (branchIndexInBlock != UINT_MAX) {
1024         Node* branchNode = m_block->at(branchIndexInBlock);
1025
1026         ASSERT(node->adjustedRefCount() == 1);
1027         
1028         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1029     
1030         m_indexInBlock = branchIndexInBlock;
1031         m_currentNode = branchNode;
1032         
1033         return true;
1034     }
1035     
1036     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1037     
1038     return false;
1039 }
1040
1041 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1042 {
1043     unsigned branchIndexInBlock = detectPeepHoleBranch();
1044     if (branchIndexInBlock != UINT_MAX) {
1045         Node* branchNode = m_block->at(branchIndexInBlock);
1046
1047         ASSERT(node->adjustedRefCount() == 1);
1048         
1049         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1050     
1051         m_indexInBlock = branchIndexInBlock;
1052         m_currentNode = branchNode;
1053         
1054         return true;
1055     }
1056     
1057     nonSpeculativeNonPeepholeStrictEq(node, invert);
1058     
1059     return false;
1060 }
1061
1062 #ifndef NDEBUG
1063 static const char* dataFormatString(DataFormat format)
1064 {
1065     // These values correspond to the DataFormat enum.
1066     const char* strings[] = {
1067         "[  ]",
1068         "[ i]",
1069         "[ d]",
1070         "[ c]",
1071         "Err!",
1072         "Err!",
1073         "Err!",
1074         "Err!",
1075         "[J ]",
1076         "[Ji]",
1077         "[Jd]",
1078         "[Jc]",
1079         "Err!",
1080         "Err!",
1081         "Err!",
1082         "Err!",
1083     };
1084     return strings[format];
1085 }
1086
1087 void SpeculativeJIT::dump(const char* label)
1088 {
1089     if (label)
1090         dataLogF("<%s>\n", label);
1091
1092     dataLogF("  gprs:\n");
1093     m_gprs.dump();
1094     dataLogF("  fprs:\n");
1095     m_fprs.dump();
1096     dataLogF("  VirtualRegisters:\n");
1097     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1098         GenerationInfo& info = m_generationInfo[i];
1099         if (info.alive())
1100             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1101         else
1102             dataLogF("    % 3d:[__][__]", i);
1103         if (info.registerFormat() == DataFormatDouble)
1104             dataLogF(":fpr%d\n", info.fpr());
1105         else if (info.registerFormat() != DataFormatNone
1106 #if USE(JSVALUE32_64)
1107             && !(info.registerFormat() & DataFormatJS)
1108 #endif
1109             ) {
1110             ASSERT(info.gpr() != InvalidGPRReg);
1111             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1112         } else
1113             dataLogF("\n");
1114     }
1115     if (label)
1116         dataLogF("</%s>\n", label);
1117 }
1118 #endif
1119
1120
1121 #if DFG_ENABLE(CONSISTENCY_CHECK)
1122 void SpeculativeJIT::checkConsistency()
1123 {
1124     bool failed = false;
1125
1126     for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
1127         if (iter.isLocked()) {
1128             dataLogF("DFG_CONSISTENCY_CHECK failed: gpr %s is locked.\n", iter.debugName());
1129             failed = true;
1130         }
1131     }
1132     for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
1133         if (iter.isLocked()) {
1134             dataLogF("DFG_CONSISTENCY_CHECK failed: fpr %s is locked.\n", iter.debugName());
1135             failed = true;
1136         }
1137     }
1138
1139     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1140         VirtualRegister virtualRegister = (VirtualRegister)i;
1141         GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1142         if (!info.alive())
1143             continue;
1144         switch (info.registerFormat()) {
1145         case DataFormatNone:
1146             break;
1147         case DataFormatJS:
1148         case DataFormatJSInt32:
1149         case DataFormatJSDouble:
1150         case DataFormatJSCell:
1151         case DataFormatJSBoolean:
1152 #if USE(JSVALUE32_64)
1153             break;
1154 #endif
1155         case DataFormatInt32:
1156         case DataFormatCell:
1157         case DataFormatBoolean:
1158         case DataFormatStorage: {
1159             GPRReg gpr = info.gpr();
1160             ASSERT(gpr != InvalidGPRReg);
1161             if (m_gprs.name(gpr) != virtualRegister) {
1162                 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (gpr %s).\n", virtualRegister, GPRInfo::debugName(gpr));
1163                 failed = true;
1164             }
1165             break;
1166         }
1167         case DataFormatDouble: {
1168             FPRReg fpr = info.fpr();
1169             ASSERT(fpr != InvalidFPRReg);
1170             if (m_fprs.name(fpr) != virtualRegister) {
1171                 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (fpr %s).\n", virtualRegister, FPRInfo::debugName(fpr));
1172                 failed = true;
1173             }
1174             break;
1175         }
1176         case DataFormatOSRMarker:
1177         case DataFormatDead:
1178         case DataFormatArguments:
1179             RELEASE_ASSERT_NOT_REACHED();
1180             break;
1181         }
1182     }
1183
1184     for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
1185         VirtualRegister virtualRegister = iter.name();
1186         if (!virtualRegister.isValid())
1187             continue;
1188
1189         GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1190 #if USE(JSVALUE64)
1191         if (iter.regID() != info.gpr()) {
1192             dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
1193             failed = true;
1194         }
1195 #else
1196         if (!(info.registerFormat() & DataFormatJS)) {
1197             if (iter.regID() != info.gpr()) {
1198                 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
1199                 failed = true;
1200             }
1201         } else {
1202             if (iter.regID() != info.tagGPR() && iter.regID() != info.payloadGPR()) {
1203                 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
1204                 failed = true;
1205             }
1206         }
1207 #endif
1208     }
1209
1210     for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
1211         VirtualRegister virtualRegister = iter.name();
1212         if (!virtualRegister.isValid())
1213             continue;
1214
1215         GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1216         if (iter.regID() != info.fpr()) {
1217             dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for fpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
1218             failed = true;
1219         }
1220     }
1221
1222     if (failed) {
1223         dump();
1224         CRASH();
1225     }
1226 }
1227 #endif
1228
1229 GPRTemporary::GPRTemporary()
1230     : m_jit(0)
1231     , m_gpr(InvalidGPRReg)
1232 {
1233 }
1234
1235 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1236     : m_jit(jit)
1237     , m_gpr(InvalidGPRReg)
1238 {
1239     m_gpr = m_jit->allocate();
1240 }
1241
1242 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1243     : m_jit(jit)
1244     , m_gpr(InvalidGPRReg)
1245 {
1246     m_gpr = m_jit->allocate(specific);
1247 }
1248
1249 #if USE(JSVALUE32_64)
1250 GPRTemporary::GPRTemporary(
1251     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1252     : m_jit(jit)
1253     , m_gpr(InvalidGPRReg)
1254 {
1255     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1256         m_gpr = m_jit->reuse(op1.gpr(which));
1257     else
1258         m_gpr = m_jit->allocate();
1259 }
1260 #endif // USE(JSVALUE32_64)
1261
1262 void GPRTemporary::adopt(GPRTemporary& other)
1263 {
1264     ASSERT(!m_jit);
1265     ASSERT(m_gpr == InvalidGPRReg);
1266     ASSERT(other.m_jit);
1267     ASSERT(other.m_gpr != InvalidGPRReg);
1268     m_jit = other.m_jit;
1269     m_gpr = other.m_gpr;
1270     other.m_jit = 0;
1271     other.m_gpr = InvalidGPRReg;
1272 }
1273
1274 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1275     : m_jit(jit)
1276     , m_fpr(InvalidFPRReg)
1277 {
1278     m_fpr = m_jit->fprAllocate();
1279 }
1280
1281 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1282     : m_jit(jit)
1283     , m_fpr(InvalidFPRReg)
1284 {
1285     if (m_jit->canReuse(op1.node()))
1286         m_fpr = m_jit->reuse(op1.fpr());
1287     else
1288         m_fpr = m_jit->fprAllocate();
1289 }
1290
1291 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1292     : m_jit(jit)
1293     , m_fpr(InvalidFPRReg)
1294 {
1295     if (m_jit->canReuse(op1.node()))
1296         m_fpr = m_jit->reuse(op1.fpr());
1297     else if (m_jit->canReuse(op2.node()))
1298         m_fpr = m_jit->reuse(op2.fpr());
1299     else
1300         m_fpr = m_jit->fprAllocate();
1301 }
1302
1303 #if USE(JSVALUE32_64)
1304 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1305     : m_jit(jit)
1306     , m_fpr(InvalidFPRReg)
1307 {
1308     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1309         m_fpr = m_jit->reuse(op1.fpr());
1310     else
1311         m_fpr = m_jit->fprAllocate();
1312 }
1313 #endif
1314
1315 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1316 {
1317     BasicBlock* taken = branchNode->takenBlock();
1318     BasicBlock* notTaken = branchNode->notTakenBlock();
1319     
1320     SpeculateDoubleOperand op1(this, node->child1());
1321     SpeculateDoubleOperand op2(this, node->child2());
1322     
1323     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1324     jump(notTaken);
1325 }
1326
1327 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1328 {
1329     BasicBlock* taken = branchNode->takenBlock();
1330     BasicBlock* notTaken = branchNode->notTakenBlock();
1331
1332     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1333     
1334     if (taken == nextBlock()) {
1335         condition = MacroAssembler::NotEqual;
1336         BasicBlock* tmp = taken;
1337         taken = notTaken;
1338         notTaken = tmp;
1339     }
1340
1341     SpeculateCellOperand op1(this, node->child1());
1342     SpeculateCellOperand op2(this, node->child2());
1343     
1344     GPRReg op1GPR = op1.gpr();
1345     GPRReg op2GPR = op2.gpr();
1346     
1347     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1348         speculationWatchpointForMasqueradesAsUndefined();
1349
1350         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1351             speculationCheck(
1352                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), 
1353                 m_jit.branchPtr(
1354                     MacroAssembler::Equal, 
1355                     MacroAssembler::Address(op1GPR, JSCell::structureOffset()), 
1356                     MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1357         }
1358         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1359             speculationCheck(
1360                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1361                 m_jit.branchPtr(
1362                     MacroAssembler::Equal, 
1363                     MacroAssembler::Address(op2GPR, JSCell::structureOffset()), 
1364                     MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1365         }
1366     } else {
1367         GPRTemporary structure(this);
1368         GPRReg structureGPR = structure.gpr();
1369
1370         m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR);
1371         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1372             speculationCheck(
1373                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1374                 m_jit.branchPtr(
1375                     MacroAssembler::Equal, 
1376                     structureGPR, 
1377                     MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1378         }
1379         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1380             m_jit.branchTest8(
1381                 MacroAssembler::NonZero, 
1382                 MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), 
1383                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1384
1385         m_jit.loadPtr(MacroAssembler::Address(op2GPR, JSCell::structureOffset()), structureGPR);
1386         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1387             speculationCheck(
1388                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1389                 m_jit.branchPtr(
1390                     MacroAssembler::Equal, 
1391                     structureGPR, 
1392                     MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1393         }
1394         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1395             m_jit.branchTest8(
1396                 MacroAssembler::NonZero, 
1397                 MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), 
1398                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1399     }
1400
1401     branchPtr(condition, op1GPR, op2GPR, taken);
1402     jump(notTaken);
1403 }
1404
1405 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1406 {
1407     BasicBlock* taken = branchNode->takenBlock();
1408     BasicBlock* notTaken = branchNode->notTakenBlock();
1409
1410     // The branch instruction will branch to the taken block.
1411     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1412     if (taken == nextBlock()) {
1413         condition = JITCompiler::invert(condition);
1414         BasicBlock* tmp = taken;
1415         taken = notTaken;
1416         notTaken = tmp;
1417     }
1418
1419     if (isBooleanConstant(node->child1().node())) {
1420         bool imm = valueOfBooleanConstant(node->child1().node());
1421         SpeculateBooleanOperand op2(this, node->child2());
1422         branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1423     } else if (isBooleanConstant(node->child2().node())) {
1424         SpeculateBooleanOperand op1(this, node->child1());
1425         bool imm = valueOfBooleanConstant(node->child2().node());
1426         branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1427     } else {
1428         SpeculateBooleanOperand op1(this, node->child1());
1429         SpeculateBooleanOperand op2(this, node->child2());
1430         branch32(condition, op1.gpr(), op2.gpr(), taken);
1431     }
1432
1433     jump(notTaken);
1434 }
1435
1436 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1437 {
1438     BasicBlock* taken = branchNode->takenBlock();
1439     BasicBlock* notTaken = branchNode->notTakenBlock();
1440
1441     // The branch instruction will branch to the taken block.
1442     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1443     if (taken == nextBlock()) {
1444         condition = JITCompiler::invert(condition);
1445         BasicBlock* tmp = taken;
1446         taken = notTaken;
1447         notTaken = tmp;
1448     }
1449
1450     if (isInt32Constant(node->child1().node())) {
1451         int32_t imm = valueOfInt32Constant(node->child1().node());
1452         SpeculateInt32Operand op2(this, node->child2());
1453         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1454     } else if (isInt32Constant(node->child2().node())) {
1455         SpeculateInt32Operand op1(this, node->child1());
1456         int32_t imm = valueOfInt32Constant(node->child2().node());
1457         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1458     } else {
1459         SpeculateInt32Operand op1(this, node->child1());
1460         SpeculateInt32Operand op2(this, node->child2());
1461         branch32(condition, op1.gpr(), op2.gpr(), taken);
1462     }
1463
1464     jump(notTaken);
1465 }
1466
1467 // Returns true if the compare is fused with a subsequent branch.
1468 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_DFGOperation_EJJ operation)
1469 {
1470     // Fused compare & branch.
1471     unsigned branchIndexInBlock = detectPeepHoleBranch();
1472     if (branchIndexInBlock != UINT_MAX) {
1473         Node* branchNode = m_block->at(branchIndexInBlock);
1474
1475         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1476         // so can be no intervening nodes to also reference the compare. 
1477         ASSERT(node->adjustedRefCount() == 1);
1478
1479         if (node->isBinaryUseKind(Int32Use))
1480             compilePeepHoleInt32Branch(node, branchNode, condition);
1481 #if USE(JSVALUE64)
1482         else if (node->isBinaryUseKind(MachineIntUse))
1483             compilePeepHoleInt52Branch(node, branchNode, condition);
1484 #endif // USE(JSVALUE64)
1485         else if (node->isBinaryUseKind(NumberUse))
1486             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1487         else if (node->op() == CompareEq) {
1488             if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1489                 // Use non-peephole comparison, for now.
1490                 return false;
1491             }
1492             if (node->isBinaryUseKind(BooleanUse))
1493                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1494             else if (node->isBinaryUseKind(ObjectUse))
1495                 compilePeepHoleObjectEquality(node, branchNode);
1496             else if (node->child1().useKind() == ObjectUse && node->child2().useKind() == ObjectOrOtherUse)
1497                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1498             else if (node->child1().useKind() == ObjectOrOtherUse && node->child2().useKind() == ObjectUse)
1499                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1500             else {
1501                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1502                 return true;
1503             }
1504         } else {
1505             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1506             return true;
1507         }
1508
1509         use(node->child1());
1510         use(node->child2());
1511         m_indexInBlock = branchIndexInBlock;
1512         m_currentNode = branchNode;
1513         return true;
1514     }
1515     return false;
1516 }
1517
1518 void SpeculativeJIT::noticeOSRBirth(Node* node)
1519 {
1520     if (!node->hasVirtualRegister())
1521         return;
1522     
1523     VirtualRegister virtualRegister = node->virtualRegister();
1524     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1525     
1526     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1527 }
1528
1529 void SpeculativeJIT::compileMovHint(Node* node)
1530 {
1531     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1532     
1533     m_lastSetOperand = node->local();
1534
1535     Node* child = node->child1().node();
1536     noticeOSRBirth(child);
1537     
1538     if (child->op() == UInt32ToNumber)
1539         noticeOSRBirth(child->child1().node());
1540     
1541     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->local().offset()));
1542 }
1543
1544 void SpeculativeJIT::compileMovHintAndCheck(Node* node)
1545 {
1546     compileMovHint(node);
1547     speculate(node, node->child1());
1548     noResult(node);
1549 }
1550
1551 void SpeculativeJIT::compileInlineStart(Node* node)
1552 {
1553     InlineCallFrame* inlineCallFrame = node->codeOrigin.inlineCallFrame;
1554     int argumentCountIncludingThis = inlineCallFrame->arguments.size();
1555     unsigned argumentPositionStart = node->argumentPositionStart();
1556     CodeBlock* codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
1557     for (int i = 0; i < argumentCountIncludingThis; ++i) {
1558         ValueRecovery recovery;
1559         if (codeBlock->isCaptured(virtualRegisterForArgument(i)))
1560             recovery = ValueRecovery::alreadyInJSStack();
1561         else {
1562             ArgumentPosition& argumentPosition =
1563                 m_jit.graph().m_argumentPositions[argumentPositionStart + i];
1564             ValueSource valueSource;
1565             switch (argumentPosition.flushFormat()) {
1566             case DeadFlush:
1567             case FlushedJSValue:
1568                 valueSource = ValueSource(ValueInJSStack);
1569                 break;
1570             case FlushedDouble:
1571                 valueSource = ValueSource(DoubleInJSStack);
1572                 break;
1573             case FlushedInt32:
1574                 valueSource = ValueSource(Int32InJSStack);
1575                 break;
1576             case FlushedInt52:
1577                 valueSource = ValueSource(Int52InJSStack);
1578                 break;
1579             case FlushedCell:
1580                 valueSource = ValueSource(CellInJSStack);
1581                 break;
1582             case FlushedBoolean:
1583                 valueSource = ValueSource(BooleanInJSStack);
1584                 break;
1585             }
1586             recovery = computeValueRecoveryFor(valueSource);
1587         }
1588         // The recovery should refer either to something that has already been
1589         // stored into the stack at the right place, or to a constant,
1590         // since the Arguments code isn't smart enough to handle anything else.
1591         // The exception is the this argument, which we don't really need to be
1592         // able to recover.
1593 #if DFG_ENABLE(DEBUG_VERBOSE)
1594         dataLogF("\nRecovery for argument %d: ", i);
1595         recovery.dump(WTF::dataFile());
1596 #endif
1597         inlineCallFrame->arguments[i] = recovery;
1598     }
1599 }
1600
1601 void SpeculativeJIT::bail()
1602 {
1603     m_compileOkay = true;
1604     m_jit.breakpoint();
1605     clearGenerationInfo();
1606 }
1607
1608 void SpeculativeJIT::compileCurrentBlock()
1609 {
1610     ASSERT(m_compileOkay);
1611     
1612     if (!m_block)
1613         return;
1614     
1615     ASSERT(m_block->isReachable);
1616     
1617     if (!m_block->cfaHasVisited) {
1618         // Don't generate code for basic blocks that are unreachable according to CFA.
1619         // But to be sure that nobody has generated a jump to this block, drop in a
1620         // breakpoint here.
1621         m_jit.breakpoint();
1622         return;
1623     }
1624
1625     m_jit.blockHeads()[m_block->index] = m_jit.label();
1626 #if DFG_ENABLE(JIT_BREAK_ON_EVERY_BLOCK)
1627     m_jit.breakpoint();
1628 #endif
1629     
1630 #if DFG_ENABLE(DEBUG_VERBOSE)
1631     dataLog("Setting up state for block ", *m_block, ": ");
1632 #endif
1633     
1634     m_stream->appendAndLog(VariableEvent::reset());
1635     
1636     m_jit.jitAssertHasValidCallFrame();
1637
1638     ASSERT(m_arguments.size() == m_block->variablesAtHead.numberOfArguments());
1639     for (size_t i = 0; i < m_arguments.size(); ++i) {
1640         ValueSource valueSource = ValueSource(ValueInJSStack);
1641         m_arguments[i] = valueSource;
1642         m_stream->appendAndLog(VariableEvent::setLocal(virtualRegisterForArgument(i), valueSource.dataFormat()));
1643     }
1644     
1645     m_state.reset();
1646     m_state.beginBasicBlock(m_block);
1647     
1648     ASSERT(m_variables.size() == m_block->variablesAtHead.numberOfLocals());
1649     for (size_t i = 0; i < m_variables.size(); ++i) {
1650         Node* node = m_block->variablesAtHead.local(i);
1651         ValueSource valueSource;
1652         if (!node)
1653             valueSource = ValueSource(SourceIsDead);
1654         else if (node->variableAccessData()->isArgumentsAlias())
1655             valueSource = ValueSource(ArgumentsSource);
1656         else if (!node->refCount())
1657             valueSource = ValueSource(SourceIsDead);
1658         else
1659             valueSource = ValueSource::forFlushFormat(node->variableAccessData()->flushFormat());
1660         m_variables[i] = valueSource;
1661         // FIXME: Don't emit SetLocal(Dead). https://bugs.webkit.org/show_bug.cgi?id=108019
1662         m_stream->appendAndLog(VariableEvent::setLocal(virtualRegisterForLocal(i), valueSource.dataFormat()));
1663     }
1664     
1665     m_lastSetOperand = VirtualRegister();
1666     m_codeOriginForExitTarget = CodeOrigin();
1667     m_codeOriginForExitProfile = CodeOrigin();
1668     
1669 #if DFG_ENABLE(DEBUG_VERBOSE)
1670     dataLogF("\n");
1671 #endif
1672
1673     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1674         m_currentNode = m_block->at(m_indexInBlock);
1675         
1676         // We may have his a contradiction that the CFA was aware of but that the JIT
1677         // didn't cause directly.
1678         if (!m_state.isValid()) {
1679             bail();
1680             return;
1681         }
1682         
1683         m_canExit = m_currentNode->canExit();
1684         bool shouldExecuteEffects = m_interpreter.startExecuting(m_currentNode);
1685         m_jit.setForNode(m_currentNode);
1686         m_codeOriginForExitTarget = m_currentNode->codeOriginForExitTarget;
1687         m_codeOriginForExitProfile = m_currentNode->codeOrigin;
1688         if (!m_currentNode->shouldGenerate()) {
1689 #if DFG_ENABLE(DEBUG_VERBOSE)
1690             dataLogF("SpeculativeJIT skipping Node @%d (bc#%u) at JIT offset 0x%x     ", m_currentNode->index(), m_currentNode->codeOrigin.bytecodeIndex, m_jit.debugOffset());
1691 #endif
1692             switch (m_currentNode->op()) {
1693             case JSConstant:
1694                 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1695                 break;
1696                 
1697             case WeakJSConstant:
1698                 m_jit.addWeakReference(m_currentNode->weakConstant());
1699                 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1700                 break;
1701                 
1702             case SetLocal:
1703                 RELEASE_ASSERT_NOT_REACHED();
1704                 break;
1705                 
1706             case MovHint:
1707                 compileMovHint(m_currentNode);
1708                 break;
1709                 
1710             case ZombieHint: {
1711                 m_lastSetOperand = m_currentNode->local();
1712                 m_stream->appendAndLog(VariableEvent::setLocal(m_currentNode->local(), DataFormatDead));
1713                 break;
1714             }
1715
1716             default:
1717                 if (belongsInMinifiedGraph(m_currentNode->op()))
1718                     m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1719                 break;
1720             }
1721         } else {
1722             
1723             if (verboseCompilationEnabled()) {
1724                 dataLogF(
1725                     "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1726                     (int)m_currentNode->index(),
1727                     m_currentNode->codeOrigin.bytecodeIndex, m_jit.debugOffset());
1728 #if DFG_ENABLE(DEBUG_VERBOSE)
1729                 dataLog("   ");
1730 #else
1731                 dataLog("\n");
1732 #endif
1733             }
1734 #if DFG_ENABLE(JIT_BREAK_ON_EVERY_NODE)
1735             m_jit.breakpoint();
1736 #endif
1737 #if DFG_ENABLE(XOR_DEBUG_AID)
1738             m_jit.xorPtr(JITCompiler::TrustedImm32(m_currentNode->index()), GPRInfo::regT0);
1739             m_jit.xorPtr(JITCompiler::TrustedImm32(m_currentNode->index()), GPRInfo::regT0);
1740 #endif
1741             checkConsistency();
1742             
1743             m_speculationDirection = (m_currentNode->flags() & NodeExitsForward) ? ForwardSpeculation : BackwardSpeculation;
1744             
1745             compile(m_currentNode);
1746             if (!m_compileOkay) {
1747                 bail();
1748                 return;
1749             }
1750             
1751             if (belongsInMinifiedGraph(m_currentNode->op())) {
1752                 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1753                 noticeOSRBirth(m_currentNode);
1754             }
1755             
1756 #if DFG_ENABLE(DEBUG_VERBOSE)
1757             if (m_currentNode->hasResult()) {
1758                 GenerationInfo& info = m_generationInfo[m_currentNode->virtualRegister()];
1759                 dataLogF("-> %s, vr#%d", dataFormatToString(info.registerFormat()), (int)m_currentNode->virtualRegister());
1760                 if (info.registerFormat() != DataFormatNone) {
1761                     if (info.registerFormat() == DataFormatDouble)
1762                         dataLogF(", %s", FPRInfo::debugName(info.fpr()));
1763 #if USE(JSVALUE32_64)
1764                     else if (info.registerFormat() & DataFormatJS)
1765                         dataLogF(", %s %s", GPRInfo::debugName(info.tagGPR()), GPRInfo::debugName(info.payloadGPR()));
1766 #endif
1767                     else
1768                         dataLogF(", %s", GPRInfo::debugName(info.gpr()));
1769                 }
1770                 dataLogF("    ");
1771             } else
1772                 dataLogF("    ");
1773 #endif
1774         }
1775         
1776 #if DFG_ENABLE(DEBUG_VERBOSE)
1777         dataLogF("\n");
1778 #endif
1779         
1780         // Make sure that the abstract state is rematerialized for the next node.
1781         if (shouldExecuteEffects)
1782             m_interpreter.executeEffects(m_indexInBlock);
1783         
1784         if (m_currentNode->shouldGenerate())
1785             checkConsistency();
1786     }
1787     
1788     // Perform the most basic verification that children have been used correctly.
1789 #if !ASSERT_DISABLED
1790     for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1791         GenerationInfo& info = m_generationInfo[index];
1792         ASSERT(!info.alive());
1793     }
1794 #endif
1795 }
1796
1797 // If we are making type predictions about our arguments then
1798 // we need to check that they are correct on function entry.
1799 void SpeculativeJIT::checkArgumentTypes()
1800 {
1801     ASSERT(!m_currentNode);
1802     m_isCheckingArgumentTypes = true;
1803     m_speculationDirection = BackwardSpeculation;
1804     m_codeOriginForExitTarget = CodeOrigin(0);
1805     m_codeOriginForExitProfile = CodeOrigin(0);
1806
1807     for (size_t i = 0; i < m_arguments.size(); ++i)
1808         m_arguments[i] = ValueSource(ValueInJSStack);
1809     for (size_t i = 0; i < m_variables.size(); ++i)
1810         m_variables[i] = ValueSource(ValueInJSStack);
1811     
1812     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1813         Node* node = m_jit.graph().m_arguments[i];
1814         ASSERT(node->op() == SetArgument);
1815         if (!node->shouldGenerate()) {
1816             // The argument is dead. We don't do any checks for such arguments.
1817             continue;
1818         }
1819         
1820         VariableAccessData* variableAccessData = node->variableAccessData();
1821         FlushFormat format = variableAccessData->flushFormat();
1822         
1823         if (format == FlushedJSValue)
1824             continue;
1825         
1826         VirtualRegister virtualRegister = variableAccessData->local();
1827
1828         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1829         
1830 #if USE(JSVALUE64)
1831         switch (format) {
1832         case FlushedInt32: {
1833             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1834             break;
1835         }
1836         case FlushedBoolean: {
1837             GPRTemporary temp(this);
1838             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1839             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1840             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1841             break;
1842         }
1843         case FlushedCell: {
1844             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1845             break;
1846         }
1847         default:
1848             RELEASE_ASSERT_NOT_REACHED();
1849             break;
1850         }
1851 #else
1852         switch (format) {
1853         case FlushedInt32: {
1854             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1855             break;
1856         }
1857         case FlushedBoolean: {
1858             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1859             break;
1860         }
1861         case FlushedCell: {
1862             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1863             break;
1864         }
1865         default:
1866             RELEASE_ASSERT_NOT_REACHED();
1867             break;
1868         }
1869 #endif
1870     }
1871     m_isCheckingArgumentTypes = false;
1872 }
1873
1874 bool SpeculativeJIT::compile()
1875 {
1876     checkArgumentTypes();
1877
1878     ASSERT(!m_currentNode);
1879     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1880         m_jit.setForBlockIndex(blockIndex);
1881         m_block = m_jit.graph().block(blockIndex);
1882         compileCurrentBlock();
1883     }
1884     linkBranches();
1885     return true;
1886 }
1887
1888 void SpeculativeJIT::createOSREntries()
1889 {
1890     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1891         BasicBlock* block = m_jit.graph().block(blockIndex);
1892         if (!block)
1893             continue;
1894         if (!block->isOSRTarget)
1895             continue;
1896
1897         // Currently we don't have OSR entry trampolines. We could add them
1898         // here if need be.
1899         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1900     }
1901 }
1902
1903 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1904 {
1905     unsigned osrEntryIndex = 0;
1906     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1907         BasicBlock* block = m_jit.graph().block(blockIndex);
1908         if (!block)
1909             continue;
1910         if (!block->isOSRTarget)
1911             continue;
1912         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1913     }
1914     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1915 }
1916
1917 ValueRecovery SpeculativeJIT::computeValueRecoveryFor(const ValueSource& valueSource)
1918 {
1919     if (valueSource.isInJSStack())
1920         return valueSource.valueRecovery();
1921         
1922     ASSERT(valueSource.kind() == HaveNode);
1923     Node* node = valueSource.id().node(m_jit.graph());
1924     if (isConstant(node))
1925         return ValueRecovery::constant(valueOfJSConstant(node));
1926     
1927     return ValueRecovery();
1928 }
1929
1930 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1931 {
1932     Edge child3 = m_jit.graph().varArgChild(node, 2);
1933     Edge child4 = m_jit.graph().varArgChild(node, 3);
1934
1935     ArrayMode arrayMode = node->arrayMode();
1936     
1937     GPRReg baseReg = base.gpr();
1938     GPRReg propertyReg = property.gpr();
1939     
1940     SpeculateDoubleOperand value(this, child3);
1941
1942     FPRReg valueReg = value.fpr();
1943     
1944     DFG_TYPE_CHECK(
1945         JSValueRegs(), child3, SpecFullRealNumber,
1946         m_jit.branchDouble(
1947             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1948     
1949     if (!m_compileOkay)
1950         return;
1951     
1952     StorageOperand storage(this, child4);
1953     GPRReg storageReg = storage.gpr();
1954
1955     if (node->op() == PutByValAlias) {
1956         // Store the value to the array.
1957         GPRReg propertyReg = property.gpr();
1958         FPRReg valueReg = value.fpr();
1959         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1960         
1961         noResult(m_currentNode);
1962         return;
1963     }
1964     
1965     GPRTemporary temporary;
1966     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1967
1968     MacroAssembler::Jump slowCase;
1969     
1970     if (arrayMode.isInBounds()) {
1971         speculationCheck(
1972             StoreToHoleOrOutOfBounds, JSValueRegs(), 0,
1973             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1974     } else {
1975         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1976         
1977         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1978         
1979         if (!arrayMode.isOutOfBounds())
1980             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1981         
1982         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1983         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1984         
1985         inBounds.link(&m_jit);
1986     }
1987     
1988     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1989
1990     base.use();
1991     property.use();
1992     value.use();
1993     storage.use();
1994     
1995     if (arrayMode.isOutOfBounds()) {
1996         addSlowPathGenerator(
1997             slowPathCall(
1998                 slowCase, this,
1999                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
2000                 NoResult, baseReg, propertyReg, valueReg));
2001     }
2002
2003     noResult(m_currentNode, UseChildrenCalledExplicitly);
2004 }
2005
2006 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
2007 {
2008     SpeculateCellOperand string(this, node->child1());
2009     SpeculateStrictInt32Operand index(this, node->child2());
2010     StorageOperand storage(this, node->child3());
2011
2012     GPRReg stringReg = string.gpr();
2013     GPRReg indexReg = index.gpr();
2014     GPRReg storageReg = storage.gpr();
2015     
2016     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
2017
2018     // unsigned comparison so we can filter out negative indices and indices that are too large
2019     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
2020
2021     GPRTemporary scratch(this);
2022     GPRReg scratchReg = scratch.gpr();
2023
2024     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
2025
2026     // Load the character into scratchReg
2027     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2028
2029     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
2030     JITCompiler::Jump cont8Bit = m_jit.jump();
2031
2032     is16Bit.link(&m_jit);
2033
2034     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
2035
2036     cont8Bit.link(&m_jit);
2037
2038     int32Result(scratchReg, m_currentNode);
2039 }
2040
2041 void SpeculativeJIT::compileGetByValOnString(Node* node)
2042 {
2043     SpeculateCellOperand base(this, node->child1());
2044     SpeculateStrictInt32Operand property(this, node->child2());
2045     StorageOperand storage(this, node->child3());
2046     GPRReg baseReg = base.gpr();
2047     GPRReg propertyReg = property.gpr();
2048     GPRReg storageReg = storage.gpr();
2049
2050     GPRTemporary scratch(this);
2051     GPRReg scratchReg = scratch.gpr();
2052 #if USE(JSVALUE32_64)
2053     GPRTemporary resultTag;
2054     GPRReg resultTagReg = InvalidGPRReg;
2055     if (node->arrayMode().isOutOfBounds()) {
2056         GPRTemporary realResultTag(this);
2057         resultTag.adopt(realResultTag);
2058         resultTagReg = resultTag.gpr();
2059     }
2060 #endif
2061
2062     if (node->arrayMode().isOutOfBounds()) {
2063         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->codeOrigin);
2064         if (globalObject->stringPrototypeChainIsSane()) {
2065             m_jit.addLazily(
2066                 speculationWatchpoint(),
2067                 globalObject->stringPrototype()->structure()->transitionWatchpointSet());
2068             m_jit.addLazily(
2069                 speculationWatchpoint(),
2070                 globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2071         }
2072     }
2073
2074     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2075
2076     // unsigned comparison so we can filter out negative indices and indices that are too large
2077     JITCompiler::Jump outOfBounds = m_jit.branch32(
2078         MacroAssembler::AboveOrEqual, propertyReg,
2079         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
2080     if (node->arrayMode().isInBounds())
2081         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
2082
2083     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
2084
2085     // Load the character into scratchReg
2086     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2087
2088     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
2089     JITCompiler::Jump cont8Bit = m_jit.jump();
2090
2091     is16Bit.link(&m_jit);
2092
2093     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
2094
2095     JITCompiler::Jump bigCharacter =
2096         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
2097
2098     // 8 bit string values don't need the isASCII check.
2099     cont8Bit.link(&m_jit);
2100
2101 #if CPU(X86)
2102     // Don't have enough register, construct our own indexed address and load.
2103     m_jit.lshift32(MacroAssembler::TrustedImm32(2), scratchReg);
2104     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
2105     m_jit.loadPtr(scratchReg, scratchReg);
2106 #else
2107     GPRTemporary smallStrings(this);
2108     GPRReg smallStringsReg = smallStrings.gpr();
2109     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2110     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, scratchReg, MacroAssembler::ScalePtr, 0), scratchReg);
2111 #endif
2112
2113     addSlowPathGenerator(
2114         slowPathCall(
2115             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
2116
2117     if (node->arrayMode().isOutOfBounds()) {
2118 #if USE(JSVALUE32_64)
2119         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
2120 #endif
2121
2122         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->codeOrigin);
2123         if (globalObject->stringPrototypeChainIsSane()) {
2124 #if USE(JSVALUE64)
2125             addSlowPathGenerator(adoptPtr(new SaneStringGetByValSlowPathGenerator(
2126                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg)));
2127 #else
2128             addSlowPathGenerator(adoptPtr(new SaneStringGetByValSlowPathGenerator(
2129                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
2130                 baseReg, propertyReg)));
2131 #endif
2132         } else {
2133 #if USE(JSVALUE64)
2134             addSlowPathGenerator(
2135                 slowPathCall(
2136                     outOfBounds, this, operationGetByValStringInt,
2137                     scratchReg, baseReg, propertyReg));
2138 #else
2139             addSlowPathGenerator(
2140                 slowPathCall(
2141                     outOfBounds, this, operationGetByValStringInt,
2142                     resultTagReg, scratchReg, baseReg, propertyReg));
2143 #endif
2144         }
2145         
2146 #if USE(JSVALUE64)
2147         jsValueResult(scratchReg, m_currentNode);
2148 #else
2149         jsValueResult(resultTagReg, scratchReg, m_currentNode);
2150 #endif
2151     } else
2152         cellResult(scratchReg, m_currentNode);
2153 }
2154
2155 void SpeculativeJIT::compileFromCharCode(Node* node)
2156 {
2157     SpeculateStrictInt32Operand property(this, node->child1());
2158     GPRReg propertyReg = property.gpr();
2159     GPRTemporary smallStrings(this);
2160     GPRTemporary scratch(this);
2161     GPRReg scratchReg = scratch.gpr();
2162     GPRReg smallStringsReg = smallStrings.gpr();
2163
2164     JITCompiler::JumpList slowCases;
2165     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2166     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2167     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2168
2169     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2170     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2171     cellResult(scratchReg, m_currentNode);
2172 }
2173
2174 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2175 {
2176 #if DFG_ENABLE(DEBUG_VERBOSE)
2177     dataLogF("checkGeneratedTypeForToInt32@%d   ", node->index());
2178 #endif
2179     VirtualRegister virtualRegister = node->virtualRegister();
2180     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2181
2182     switch (info.registerFormat()) {
2183     case DataFormatStorage:
2184         RELEASE_ASSERT_NOT_REACHED();
2185
2186     case DataFormatBoolean:
2187     case DataFormatCell:
2188         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2189         return GeneratedOperandTypeUnknown;
2190
2191     case DataFormatNone:
2192     case DataFormatJSCell:
2193     case DataFormatJS:
2194     case DataFormatJSBoolean:
2195         return GeneratedOperandJSValue;
2196
2197     case DataFormatJSInt32:
2198     case DataFormatInt32:
2199         return GeneratedOperandInteger;
2200
2201     case DataFormatJSDouble:
2202     case DataFormatDouble:
2203         return GeneratedOperandDouble;
2204         
2205     default:
2206         RELEASE_ASSERT_NOT_REACHED();
2207         return GeneratedOperandTypeUnknown;
2208     }
2209 }
2210
2211 void SpeculativeJIT::compileValueToInt32(Node* node)
2212 {
2213     switch (node->child1().useKind()) {
2214     case Int32Use: {
2215         SpeculateInt32Operand op1(this, node->child1());
2216         GPRTemporary result(this, Reuse, op1);
2217         m_jit.move(op1.gpr(), result.gpr());
2218         int32Result(result.gpr(), node, op1.format());
2219         return;
2220     }
2221         
2222 #if USE(JSVALUE64)
2223     case MachineIntUse: {
2224         SpeculateStrictInt52Operand op1(this, node->child1());
2225         GPRTemporary result(this, Reuse, op1);
2226         GPRReg op1GPR = op1.gpr();
2227         GPRReg resultGPR = result.gpr();
2228         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
2229         int32Result(resultGPR, node, DataFormatInt32);
2230         return;
2231     }
2232 #endif // USE(JSVALUE64)
2233     
2234     case NumberUse:
2235     case NotCellUse: {
2236         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2237         case GeneratedOperandInteger: {
2238             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2239             GPRTemporary result(this, Reuse, op1);
2240             m_jit.move(op1.gpr(), result.gpr());
2241             int32Result(result.gpr(), node, op1.format());
2242             return;
2243         }
2244         case GeneratedOperandDouble: {
2245             GPRTemporary result(this);
2246             SpeculateDoubleOperand op1(this, node->child1(), ManualOperandSpeculation);
2247             FPRReg fpr = op1.fpr();
2248             GPRReg gpr = result.gpr();
2249             JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2250             
2251             addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
2252
2253             int32Result(gpr, node);
2254             return;
2255         }
2256         case GeneratedOperandJSValue: {
2257             GPRTemporary result(this);
2258 #if USE(JSVALUE64)
2259             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2260
2261             GPRReg gpr = op1.gpr();
2262             GPRReg resultGpr = result.gpr();
2263             FPRTemporary tempFpr(this);
2264             FPRReg fpr = tempFpr.fpr();
2265
2266             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2267             JITCompiler::JumpList converted;
2268
2269             if (node->child1().useKind() == NumberUse) {
2270                 DFG_TYPE_CHECK(
2271                     JSValueRegs(gpr), node->child1(), SpecFullNumber,
2272                     m_jit.branchTest64(
2273                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2274             } else {
2275                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2276                 
2277                 DFG_TYPE_CHECK(
2278                     JSValueRegs(gpr), node->child1(), ~SpecCell,
2279                     m_jit.branchTest64(
2280                         JITCompiler::Zero, gpr, GPRInfo::tagMaskRegister));
2281                 
2282                 // It's not a cell: so true turns into 1 and all else turns into 0.
2283                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2284                 converted.append(m_jit.jump());
2285                 
2286                 isNumber.link(&m_jit);
2287             }
2288
2289             // First, if we get here we have a double encoded as a JSValue
2290             m_jit.move(gpr, resultGpr);
2291             unboxDouble(resultGpr, fpr);
2292
2293             silentSpillAllRegisters(resultGpr);
2294             callOperation(toInt32, resultGpr, fpr);
2295             silentFillAllRegisters(resultGpr);
2296
2297             converted.append(m_jit.jump());
2298
2299             isInteger.link(&m_jit);
2300             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2301
2302             converted.link(&m_jit);
2303 #else
2304             Node* childNode = node->child1().node();
2305             VirtualRegister virtualRegister = childNode->virtualRegister();
2306             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2307
2308             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2309
2310             GPRReg payloadGPR = op1.payloadGPR();
2311             GPRReg resultGpr = result.gpr();
2312         
2313             JITCompiler::JumpList converted;
2314
2315             if (info.registerFormat() == DataFormatJSInt32)
2316                 m_jit.move(payloadGPR, resultGpr);
2317             else {
2318                 GPRReg tagGPR = op1.tagGPR();
2319                 FPRTemporary tempFpr(this);
2320                 FPRReg fpr = tempFpr.fpr();
2321                 FPRTemporary scratch(this);
2322
2323                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2324
2325                 if (node->child1().useKind() == NumberUse) {
2326                     DFG_TYPE_CHECK(
2327                         JSValueRegs(tagGPR, payloadGPR), node->child1(), SpecFullNumber,
2328                         m_jit.branch32(
2329                             MacroAssembler::AboveOrEqual, tagGPR,
2330                             TrustedImm32(JSValue::LowestTag)));
2331                 } else {
2332                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2333                     
2334                     DFG_TYPE_CHECK(
2335                         JSValueRegs(tagGPR, payloadGPR), node->child1(), ~SpecCell,
2336                         m_jit.branch32(
2337                             JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::CellTag)));
2338                     
2339                     // It's not a cell: so true turns into 1 and all else turns into 0.
2340                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2341                     m_jit.move(TrustedImm32(0), resultGpr);
2342                     converted.append(m_jit.jump());
2343                     
2344                     isBoolean.link(&m_jit);
2345                     m_jit.move(payloadGPR, resultGpr);
2346                     converted.append(m_jit.jump());
2347                     
2348                     isNumber.link(&m_jit);
2349                 }
2350
2351                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2352
2353                 silentSpillAllRegisters(resultGpr);
2354                 callOperation(toInt32, resultGpr, fpr);
2355                 silentFillAllRegisters(resultGpr);
2356
2357                 converted.append(m_jit.jump());
2358
2359                 isInteger.link(&m_jit);
2360                 m_jit.move(payloadGPR, resultGpr);
2361
2362                 converted.link(&m_jit);
2363             }
2364 #endif
2365             int32Result(resultGpr, node);
2366             return;
2367         }
2368         case GeneratedOperandTypeUnknown:
2369             RELEASE_ASSERT(!m_compileOkay);
2370             return;
2371         }
2372         RELEASE_ASSERT_NOT_REACHED();
2373         return;
2374     }
2375     
2376     case BooleanUse: {
2377         SpeculateBooleanOperand op1(this, node->child1());
2378         GPRTemporary result(this, Reuse, op1);
2379         
2380         m_jit.move(op1.gpr(), result.gpr());
2381         m_jit.and32(JITCompiler::TrustedImm32(1), result.gpr());
2382         
2383         int32Result(result.gpr(), node);
2384         return;
2385     }
2386
2387     default:
2388         ASSERT(!m_compileOkay);
2389         return;
2390     }
2391 }
2392
2393 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2394 {
2395     if (!nodeCanSpeculateInt32(node->arithNodeFlags())) {
2396         // We know that this sometimes produces doubles. So produce a double every
2397         // time. This at least allows subsequent code to not have weird conditionals.
2398             
2399         SpeculateInt32Operand op1(this, node->child1());
2400         FPRTemporary result(this);
2401             
2402         GPRReg inputGPR = op1.gpr();
2403         FPRReg outputFPR = result.fpr();
2404             
2405         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2406             
2407         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2408         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2409         positive.link(&m_jit);
2410             
2411         doubleResult(outputFPR, node);
2412         return;
2413     }
2414
2415     SpeculateInt32Operand op1(this, node->child1());
2416     GPRTemporary result(this); // For the benefit of OSR exit, force these to be in different registers. In reality the OSR exit compiler could find cases where you have uint32(%r1) followed by int32(%r1) and then use different registers, but that seems like too much effort.
2417
2418     m_jit.move(op1.gpr(), result.gpr());
2419
2420     // Test the operand is positive. This is a very special speculation check - we actually
2421     // use roll-forward speculation here, where if this fails, we jump to the baseline
2422     // instruction that follows us, rather than the one we're executing right now. We have
2423     // to do this because by this point, the original values necessary to compile whatever
2424     // operation the UInt32ToNumber originated from might be dead.
2425     forwardSpeculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)), ValueRecovery::uint32InGPR(result.gpr()));
2426
2427     int32Result(result.gpr(), node, op1.format());
2428 }
2429
2430 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2431 {
2432     SpeculateDoubleOperand op1(this, node->child1());
2433     FPRTemporary scratch(this);
2434     GPRTemporary result(this);
2435     
2436     FPRReg valueFPR = op1.fpr();
2437     FPRReg scratchFPR = scratch.fpr();
2438     GPRReg resultGPR = result.gpr();
2439
2440     JITCompiler::JumpList failureCases;
2441     bool negZeroCheck = !bytecodeCanIgnoreNegativeZero(node->arithNodeFlags());
2442     m_jit.branchConvertDoubleToInt32(valueFPR, resultGPR, failureCases, scratchFPR, negZeroCheck);
2443     forwardSpeculationCheck(Overflow, JSValueRegs(), 0, failureCases, ValueRecovery::inFPR(valueFPR));
2444
2445     int32Result(resultGPR, node);
2446 }
2447
2448 void SpeculativeJIT::compileInt32ToDouble(Node* node)
2449 {
2450     ASSERT(!isInt32Constant(node->child1().node())); // This should have been constant folded.
2451     
2452     if (isInt32Speculation(m_state.forNode(node->child1()).m_type)) {
2453         SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2454         FPRTemporary result(this);
2455         m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2456         doubleResult(result.fpr(), node);
2457         return;
2458     }
2459     
2460     JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2461     FPRTemporary result(this);
2462     
2463 #if USE(JSVALUE64)
2464     GPRTemporary temp(this);
2465
2466     GPRReg op1GPR = op1.gpr();
2467     GPRReg tempGPR = temp.gpr();
2468     FPRReg resultFPR = result.fpr();
2469     
2470     JITCompiler::Jump isInteger = m_jit.branch64(
2471         MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2472     
2473     if (needsTypeCheck(node->child1(), SpecFullNumber)) {
2474         if (node->flags() & NodeExitsForward) {
2475             forwardTypeCheck(
2476                 JSValueRegs(op1GPR), node->child1(), SpecFullNumber,
2477                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister),
2478                 ValueRecovery::inGPR(op1GPR, DataFormatJS));
2479         } else {
2480             backwardTypeCheck(
2481                 JSValueRegs(op1GPR), node->child1(), SpecFullNumber,
2482                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2483         }
2484     }
2485     
2486     m_jit.move(op1GPR, tempGPR);
2487     unboxDouble(tempGPR, resultFPR);
2488     JITCompiler::Jump done = m_jit.jump();
2489     
2490     isInteger.link(&m_jit);
2491     m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2492     done.link(&m_jit);
2493 #else
2494     FPRTemporary temp(this);
2495     
2496     GPRReg op1TagGPR = op1.tagGPR();
2497     GPRReg op1PayloadGPR = op1.payloadGPR();
2498     FPRReg tempFPR = temp.fpr();
2499     FPRReg resultFPR = result.fpr();
2500     
2501     JITCompiler::Jump isInteger = m_jit.branch32(
2502         MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2503     
2504     if (needsTypeCheck(node->child1(), SpecFullNumber)) {
2505         if (node->flags() & NodeExitsForward) {
2506             forwardTypeCheck(
2507                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecFullNumber,
2508                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)),
2509                 ValueRecovery::inPair(op1TagGPR, op1PayloadGPR));
2510         } else {
2511             backwardTypeCheck(
2512                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecFullNumber,
2513                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2514         }
2515     }
2516     
2517     unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2518     JITCompiler::Jump done = m_jit.jump();
2519     
2520     isInteger.link(&m_jit);
2521     m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2522     done.link(&m_jit);
2523 #endif
2524     
2525     doubleResult(resultFPR, node);
2526 }
2527
2528 static double clampDoubleToByte(double d)
2529 {
2530     d += 0.5;
2531     if (!(d > 0))
2532         d = 0;
2533     else if (d > 255)
2534         d = 255;
2535     return d;
2536 }
2537
2538 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2539 {
2540     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2541     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2542     jit.xorPtr(result, result);
2543     MacroAssembler::Jump clamped = jit.jump();
2544     tooBig.link(&jit);
2545     jit.move(JITCompiler::TrustedImm32(255), result);
2546     clamped.link(&jit);
2547     inBounds.link(&jit);
2548 }
2549
2550 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2551 {
2552     // Unordered compare so we pick up NaN
2553     static const double zero = 0;
2554     static const double byteMax = 255;
2555     static const double half = 0.5;
2556     jit.loadDouble(&zero, scratch);
2557     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2558     jit.loadDouble(&byteMax, scratch);
2559     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2560     
2561     jit.loadDouble(&half, scratch);
2562     // FIXME: This should probably just use a floating point round!
2563     // https://bugs.webkit.org/show_bug.cgi?id=72054
2564     jit.addDouble(source, scratch);
2565     jit.truncateDoubleToInt32(scratch, result);   
2566     MacroAssembler::Jump truncatedInt = jit.jump();
2567     
2568     tooSmall.link(&jit);
2569     jit.xorPtr(result, result);
2570     MacroAssembler::Jump zeroed = jit.jump();
2571     
2572     tooBig.link(&jit);
2573     jit.move(JITCompiler::TrustedImm32(255), result);
2574     
2575     truncatedInt.link(&jit);
2576     zeroed.link(&jit);
2577
2578 }
2579
2580 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2581 {
2582     ASSERT(isInt(type));
2583     
2584     SpeculateCellOperand base(this, node->child1());
2585     SpeculateStrictInt32Operand property(this, node->child2());
2586     StorageOperand storage(this, node->child3());
2587
2588     GPRReg baseReg = base.gpr();
2589     GPRReg propertyReg = property.gpr();
2590     GPRReg storageReg = storage.gpr();
2591
2592     GPRTemporary result(this);
2593     GPRReg resultReg = result.gpr();
2594
2595     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2596
2597     speculationCheck(
2598         Uncountable, JSValueRegs(), 0,
2599         m_jit.branch32(
2600             MacroAssembler::AboveOrEqual, propertyReg,
2601             MacroAssembler::Address(baseReg, JSArrayBufferView::offsetOfLength())));
2602     switch (elementSize(type)) {
2603     case 1:
2604         if (isSigned(type))
2605             m_jit.load8Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2606         else
2607             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2608         break;
2609     case 2:
2610         if (isSigned(type))
2611             m_jit.load16Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2612         else
2613             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2614         break;
2615     case 4:
2616         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2617         break;
2618     default:
2619         CRASH();
2620     }
2621     if (elementSize(type) < 4 || isSigned(type)) {
2622         int32Result(resultReg, node);
2623         return;
2624     }
2625     
2626     ASSERT(elementSize(type) == 4 && !isSigned(type));
2627     if (node->shouldSpeculateInt32()) {
2628         forwardSpeculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)), ValueRecovery::uint32InGPR(resultReg));
2629         int32Result(resultReg, node);
2630         return;
2631     }
2632     
2633 #if USE(JSVALUE64)
2634     if (node->shouldSpeculateMachineInt()) {
2635         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2636         strictInt52Result(resultReg, node);
2637         return;
2638     }
2639 #endif
2640     
2641     FPRTemporary fresult(this);
2642     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2643     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2644     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2645     positive.link(&m_jit);
2646     doubleResult(fresult.fpr(), node);
2647 }
2648
2649 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2650 {
2651     ASSERT(isInt(type));
2652     
2653     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2654     GPRReg storageReg = storage.gpr();
2655     
2656     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2657     
2658     GPRTemporary value;
2659     GPRReg valueGPR = InvalidGPRReg;
2660     
2661     if (valueUse->isConstant()) {
2662         JSValue jsValue = valueOfJSConstant(valueUse.node());
2663         if (!jsValue.isNumber()) {
2664             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2665             noResult(node);
2666             return;
2667         }
2668         double d = jsValue.asNumber();
2669         if (isClamped(type)) {
2670             ASSERT(elementSize(type) == 1);
2671             d = clampDoubleToByte(d);
2672         }
2673         GPRTemporary scratch(this);
2674         GPRReg scratchReg = scratch.gpr();
2675         m_jit.move(Imm32(toInt32(d)), scratchReg);
2676         value.adopt(scratch);
2677         valueGPR = scratchReg;
2678     } else {
2679         switch (valueUse.useKind()) {
2680         case Int32Use: {
2681             SpeculateInt32Operand valueOp(this, valueUse);
2682             GPRTemporary scratch(this);
2683             GPRReg scratchReg = scratch.gpr();
2684             m_jit.move(valueOp.gpr(), scratchReg);
2685             if (isClamped(type)) {
2686                 ASSERT(elementSize(type) == 1);
2687                 compileClampIntegerToByte(m_jit, scratchReg);
2688             }
2689             value.adopt(scratch);
2690             valueGPR = scratchReg;
2691             break;
2692         }
2693             
2694 #if USE(JSVALUE64)
2695         case MachineIntUse: {
2696             SpeculateStrictInt52Operand valueOp(this, valueUse);
2697             GPRTemporary scratch(this);
2698             GPRReg scratchReg = scratch.gpr();
2699             m_jit.move(valueOp.gpr(), scratchReg);
2700             if (isClamped(type)) {
2701                 ASSERT(elementSize(type) == 1);
2702                 MacroAssembler::Jump inBounds = m_jit.branch64(
2703                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2704                 MacroAssembler::Jump tooBig = m_jit.branch64(
2705                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2706                 m_jit.move(TrustedImm32(0), scratchReg);
2707                 MacroAssembler::Jump clamped = m_jit.jump();
2708                 tooBig.link(&m_jit);
2709                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2710                 clamped.link(&m_jit);
2711                 inBounds.link(&m_jit);
2712             }
2713             value.adopt(scratch);
2714             valueGPR = scratchReg;
2715             break;
2716         }
2717 #endif // USE(JSVALUE64)
2718             
2719         case NumberUse: {
2720             if (isClamped(type)) {
2721                 ASSERT(elementSize(type) == 1);
2722                 SpeculateDoubleOperand valueOp(this, valueUse);
2723                 GPRTemporary result(this);
2724                 FPRTemporary floatScratch(this);
2725                 FPRReg fpr = valueOp.fpr();
2726                 GPRReg gpr = result.gpr();
2727                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2728                 value.adopt(result);
2729                 valueGPR = gpr;
2730             } else {
2731                 SpeculateDoubleOperand valueOp(this, valueUse);
2732                 GPRTemporary result(this);
2733                 FPRReg fpr = valueOp.fpr();
2734                 GPRReg gpr = result.gpr();
2735                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2736                 m_jit.xorPtr(gpr, gpr);
2737                 MacroAssembler::Jump fixed = m_jit.jump();
2738                 notNaN.link(&m_jit);
2739                 
2740                 MacroAssembler::Jump failed;
2741                 if (isSigned(type))
2742                     failed = m_jit.branchTruncateDoubleToInt32(fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2743                 else
2744                     failed = m_jit.branchTruncateDoubleToUint32(fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2745                 
2746                 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr));
2747                 
2748                 fixed.link(&m_jit);
2749                 value.adopt(result);
2750                 valueGPR = gpr;
2751             }
2752             break;
2753         }
2754             
2755         default:
2756             RELEASE_ASSERT_NOT_REACHED();
2757             break;
2758         }
2759     }
2760     
2761     ASSERT_UNUSED(valueGPR, valueGPR != property);
2762     ASSERT(valueGPR != base);
2763     ASSERT(valueGPR != storageReg);
2764     MacroAssembler::Jump outOfBounds;
2765     if (node->op() == PutByVal)
2766         outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, property, MacroAssembler::Address(base, JSArrayBufferView::offsetOfLength()));
2767
2768     switch (elementSize(type)) {
2769     case 1:
2770         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2771         break;
2772     case 2:
2773         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2774         break;
2775     case 4:
2776         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2777         break;
2778     default:
2779         CRASH();
2780     }
2781     if (node->op() == PutByVal)
2782         outOfBounds.link(&m_jit);
2783     noResult(node);
2784 }
2785
2786 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2787 {
2788     ASSERT(isFloat(type));
2789     
2790     SpeculateCellOperand base(this, node->child1());
2791     SpeculateStrictInt32Operand property(this, node->child2());
2792     StorageOperand storage(this, node->child3());
2793
2794     GPRReg baseReg = base.gpr();
2795     GPRReg propertyReg = property.gpr();
2796     GPRReg storageReg = storage.gpr();
2797
2798     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2799
2800     FPRTemporary result(this);
2801     FPRReg resultReg = result.fpr();
2802     speculationCheck(
2803         Uncountable, JSValueRegs(), 0,
2804         m_jit.branch32(
2805             MacroAssembler::AboveOrEqual, propertyReg,
2806             MacroAssembler::Address(baseReg, JSArrayBufferView::offsetOfLength())));
2807     switch (elementSize(type)) {
2808     case 4:
2809         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2810         m_jit.convertFloatToDouble(resultReg, resultReg);
2811         break;
2812     case 8: {
2813         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2814         break;
2815     }
2816     default:
2817         RELEASE_ASSERT_NOT_REACHED();
2818     }
2819     
2820     MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, resultReg, resultReg);
2821     static const double NaN = QNaN;
2822     m_jit.loadDouble(&NaN, resultReg);
2823     notNaN.link(&m_jit);
2824     
2825     doubleResult(resultReg, node);
2826 }
2827
2828 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2829 {
2830     ASSERT(isFloat(type));
2831     
2832     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2833     GPRReg storageReg = storage.gpr();
2834     
2835     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2836     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2837
2838     SpeculateDoubleOperand valueOp(this, valueUse);
2839     FPRTemporary scratch(this);
2840     FPRReg valueFPR = valueOp.fpr();
2841     FPRReg scratchFPR = scratch.fpr();
2842
2843     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2844     
2845     MacroAssembler::Jump outOfBounds;
2846     if (node->op() == PutByVal) {
2847         outOfBounds = m_jit.branch32(
2848             MacroAssembler::AboveOrEqual, property,
2849             MacroAssembler::Address(base, JSArrayBufferView::offsetOfLength()));
2850     }
2851     
2852     switch (elementSize(type)) {
2853     case 4: {
2854         m_jit.moveDouble(valueFPR, scratchFPR);
2855         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2856         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2857         break;
2858     }
2859     case 8:
2860         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2861         break;
2862     default:
2863         RELEASE_ASSERT_NOT_REACHED();
2864     }
2865     if (node->op() == PutByVal)
2866         outOfBounds.link(&m_jit);
2867     noResult(node);
2868 }
2869
2870 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg)
2871 {
2872     // Check that prototype is an object.
2873     m_jit.loadPtr(MacroAssembler::Address(prototypeReg, JSCell::structureOffset()), scratchReg);
2874     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(scratchReg));
2875     
2876     // Initialize scratchReg with the value being checked.
2877     m_jit.move(valueReg, scratchReg);
2878     
2879     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2880     MacroAssembler::Label loop(&m_jit);
2881     m_jit.loadPtr(MacroAssembler::Address(scratchReg, JSCell::structureOffset()), scratchReg);
2882 #if USE(JSVALUE64)
2883     m_jit.load64(MacroAssembler::Address(scratchReg, Structure::prototypeOffset()), scratchReg);
2884     MacroAssembler::Jump isInstance = m_jit.branch64(MacroAssembler::Equal, scratchReg, prototypeReg);
2885     m_jit.branchTest64(MacroAssembler::Zero, scratchReg, GPRInfo::tagMaskRegister).linkTo(loop, &m_jit);
2886 #else
2887     m_jit.load32(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), scratchReg);
2888     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2889     m_jit.branchTest32(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2890 #endif
2891     
2892     // No match - result is false.
2893 #if USE(JSVALUE64)
2894     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2895 #else
2896     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2897 #endif
2898     MacroAssembler::Jump putResult = m_jit.jump();
2899     
2900     isInstance.link(&m_jit);
2901 #if USE(JSVALUE64)
2902     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2903 #else
2904     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2905 #endif
2906     
2907     putResult.link(&m_jit);
2908 }
2909
2910 void SpeculativeJIT::compileInstanceOf(Node* node)
2911 {
2912     if (node->child1().useKind() == UntypedUse) {
2913         // It might not be a cell. Speculate less aggressively.
2914         // Or: it might only be used once (i.e. by us), so we get zero benefit
2915         // from speculating any more aggressively than we absolutely need to.
2916         
2917         JSValueOperand value(this, node->child1());
2918         SpeculateCellOperand prototype(this, node->child2());
2919         GPRTemporary scratch(this);
2920         
2921         GPRReg prototypeReg = prototype.gpr();
2922         GPRReg scratchReg = scratch.gpr();
2923         
2924 #if USE(JSVALUE64)
2925         GPRReg valueReg = value.gpr();
2926         MacroAssembler::Jump isCell = m_jit.branchTest64(MacroAssembler::Zero, valueReg, GPRInfo::tagMaskRegister);
2927         m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2928 #else
2929         GPRReg valueTagReg = value.tagGPR();
2930         GPRReg valueReg = value.payloadGPR();
2931         MacroAssembler::Jump isCell = m_jit.branch32(MacroAssembler::Equal, valueTagReg, TrustedImm32(JSValue::CellTag));
2932         m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2933 #endif
2934
2935         MacroAssembler::Jump done = m_jit.jump();
2936         
2937         isCell.link(&m_jit);
2938         
2939         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg);
2940         
2941         done.link(&m_jit);
2942
2943 #if USE(JSVALUE64)
2944         jsValueResult(scratchReg, node, DataFormatJSBoolean);
2945 #else
2946         booleanResult(scratchReg, node);
2947 #endif
2948         return;
2949     }
2950     
2951     SpeculateCellOperand value(this, node->child1());
2952     SpeculateCellOperand prototype(this, node->child2());
2953     
2954     GPRTemporary scratch(this);
2955     
2956     GPRReg valueReg = value.gpr();
2957     GPRReg prototypeReg = prototype.gpr();
2958     GPRReg scratchReg = scratch.gpr();
2959     
2960     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg);
2961
2962 #if USE(JSVALUE64)
2963     jsValueResult(scratchReg, node, DataFormatJSBoolean);
2964 #else
2965     booleanResult(scratchReg, node);
2966 #endif
2967 }
2968
2969 void SpeculativeJIT::compileAdd(Node* node)
2970 {
2971     switch (node->binaryUseKind()) {
2972     case Int32Use: {
2973         if (isNumberConstant(node->child1().node())) {
2974             int32_t imm1 = valueOfInt32Constant(node->child1().node());
2975             SpeculateInt32Operand op2(this, node->child2());
2976             GPRTemporary result(this);
2977
2978             if (bytecodeCanTruncateInteger(node->arithNodeFlags())) {
2979                 m_jit.move(op2.gpr(), result.gpr());
2980                 m_jit.add32(Imm32(imm1), result.gpr());
2981             } else
2982                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
2983
2984             int32Result(result.gpr(), node);
2985             return;
2986         }
2987         
2988         if (isNumberConstant(node->child2().node())) {
2989             SpeculateInt32Operand op1(this, node->child1());
2990             int32_t imm2 = valueOfInt32Constant(node->child2().node());
2991             GPRTemporary result(this);
2992                 
2993             if (bytecodeCanTruncateInteger(node->arithNodeFlags())) {
2994                 m_jit.move(op1.gpr(), result.gpr());
2995                 m_jit.add32(Imm32(imm2), result.gpr());
2996             } else
2997                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
2998
2999             int32Result(result.gpr(), node);
3000             return;
3001         }
3002                 
3003         SpeculateInt32Operand op1(this, node->child1());
3004         SpeculateInt32Operand op2(this, node->child2());
3005         GPRTemporary result(this, Reuse, op1, op2);
3006
3007         GPRReg gpr1 = op1.gpr();
3008         GPRReg gpr2 = op2.gpr();
3009         GPRReg gprResult = result.gpr();
3010
3011         if (bytecodeCanTruncateInteger(node->arithNodeFlags())) {
3012             if (gpr1 == gprResult)
3013                 m_jit.add32(gpr2, gprResult);
3014             else {
3015                 m_jit.move(gpr2, gprResult);
3016                 m_jit.add32(gpr1, gprResult);
3017             }
3018         } else {
3019             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
3020                 
3021             if (gpr1 == gprResult)
3022                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
3023             else if (gpr2 == gprResult)
3024                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
3025             else
3026                 speculationCheck(Overflow, JSValueRegs(), 0, check);
3027         }
3028
3029         int32Result(gprResult, node);
3030         return;
3031     }
3032         
3033 #if USE(JSVALUE64)
3034     case MachineIntUse: {
3035         // Will we need an overflow check? If we can prove that neither input can be
3036         // Int52 then the overflow check will not be necessary.
3037         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
3038             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
3039             SpeculateWhicheverInt52Operand op1(this, node->child1());
3040             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3041             GPRTemporary result(this, Reuse, op1);
3042             m_jit.move(op1.gpr(), result.gpr());
3043             m_jit.add64(op2.gpr(), result.gpr());
3044             int52Result(result.gpr(), node, op1.format());
3045             return;
3046         }
3047         
3048         SpeculateInt52Operand op1(this, node->child1());
3049         SpeculateInt52Operand op2(this, node->child2());
3050         GPRTemporary result(this);
3051         m_jit.move(op1.gpr(), result.gpr());
3052         speculationCheck(
3053             Int52Overflow, JSValueRegs(), 0,
3054             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3055         int52Result(result.gpr(), node);
3056         return;
3057     }
3058 #endif // USE(JSVALUE64)
3059     
3060     case NumberUse: {
3061         SpeculateDoubleOperand op1(this, node->child1());
3062         SpeculateDoubleOperand op2(this, node->child2());
3063         FPRTemporary result(this, op1, op2);
3064
3065         FPRReg reg1 = op1.fpr();
3066         FPRReg reg2 = op2.fpr();
3067         m_jit.addDouble(reg1, reg2, result.fpr());
3068
3069         doubleResult(result.fpr(), node);
3070         return;
3071     }
3072         
3073     case UntypedUse: {
3074         RELEASE_ASSERT(node->op() == ValueAdd);
3075         compileValueAdd(node);
3076         return;
3077     }
3078         
3079     default:
3080         RELEASE_ASSERT_NOT_REACHED();
3081         break;
3082     }
3083 }
3084
3085 void SpeculativeJIT::compileMakeRope(Node* node)
3086 {
3087     ASSERT(node->child1().useKind() == KnownStringUse);
3088     ASSERT(node->child2().useKind() == KnownStringUse);
3089     ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
3090     
3091     SpeculateCellOperand op1(this, node->child1());
3092     SpeculateCellOperand op2(this, node->child2());
3093     SpeculateCellOperand op3(this, node->child3());
3094     GPRTemporary result(this);
3095     GPRTemporary allocator(this);
3096     GPRTemporary scratch(this);
3097     
3098     GPRReg opGPRs[3];
3099     unsigned numOpGPRs;
3100     opGPRs[0] = op1.gpr();
3101     opGPRs[1] = op2.gpr();
3102     if (node->child3()) {
3103         opGPRs[2] = op3.gpr();
3104         numOpGPRs = 3;
3105     } else {
3106         opGPRs[2] = InvalidGPRReg;
3107         numOpGPRs = 2;
3108     }
3109     GPRReg resultGPR = result.gpr();
3110     GPRReg allocatorGPR = allocator.gpr();
3111     GPRReg scratchGPR = scratch.gpr();
3112     
3113     JITCompiler::JumpList slowPath;
3114     MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithImmortalStructureDestructor(sizeof(JSRopeString));
3115     m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
3116     emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
3117         
3118     m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
3119     for (unsigned i = 0; i < numOpGPRs; ++i)
3120         m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3121     for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
3122         m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3123     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
3124     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
3125     for (unsigned i = 1; i < numOpGPRs; ++i) {
3126         m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
3127         m_jit.add32(JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR);
3128     }
3129     m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
3130     m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
3131     m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
3132     
3133     switch (numOpGPRs) {
3134     case 2:
3135         addSlowPathGenerator(slowPathCall(
3136             slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
3137         break;
3138     case 3:
3139         addSlowPathGenerator(slowPathCall(
3140             slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
3141         break;
3142     default:
3143         RELEASE_ASSERT_NOT_REACHED();
3144         break;
3145     }
3146         
3147     cellResult(resultGPR, node);
3148 }
3149
3150 void SpeculativeJIT::compileArithSub(Node* node)
3151 {
3152     switch (node->binaryUseKind()) {
3153     case Int32Use: {
3154         if (isNumberConstant(node->child2().node())) {
3155             SpeculateInt32Operand op1(this, node->child1());
3156             int32_t imm2 = valueOfInt32Constant(node->child2().node());
3157             GPRTemporary result(this);
3158
3159             if (bytecodeCanTruncateInteger(node->arithNodeFlags())) {
3160                 m_jit.move(op1.gpr(), result.gpr());
3161                 m_jit.sub32(Imm32(imm2), result.gpr());
3162             } else {
3163 #if ENABLE(JIT_CONSTANT_BLINDING)
3164                 GPRTemporary scratch(this);
3165                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
3166 #else
3167                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
3168 #endif
3169             }
3170
3171             int32Result(result.gpr(), node);
3172             return;
3173         }
3174             
3175         if (isNumberConstant(node->child1().node())) {
3176             int32_t imm1 = valueOfInt32Constant(node->child1().node());
3177             SpeculateInt32Operand op2(this, node->child2());
3178             GPRTemporary result(this);
3179                 
3180             m_jit.move(Imm32(imm1), result.gpr());
3181             if (bytecodeCanTruncateInteger(node->arithNodeFlags()))
3182                 m_jit.sub32(op2.gpr(), result.gpr());
3183             else
3184                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3185                 
3186             int32Result(result.gpr(), node);
3187             return;
3188         }
3189             
3190         SpeculateInt32Operand op1(this, node->child1());
3191         SpeculateInt32Operand op2(this, node->child2());
3192         GPRTemporary result(this);
3193
3194         if (bytecodeCanTruncateInteger(node->arithNodeFlags())) {
3195             m_jit.move(op1.gpr(), result.gpr());
3196             m_jit.sub32(op2.gpr(), result.gpr());
3197         } else
3198             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
3199
3200         int32Result(result.gpr(), node);
3201         return;
3202     }
3203         
3204 #if USE(JSVALUE64)
3205     case MachineIntUse: {
3206         // Will we need an overflow check? If we can prove that neither input can be
3207         // Int52 then the overflow check will not be necessary.
3208         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
3209             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
3210             SpeculateWhicheverInt52Operand op1(this, node->child1());
3211             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3212             GPRTemporary result(this, Reuse, op1);
3213             m_jit.move(op1.gpr(), result.gpr());
3214             m_jit.sub64(op2.gpr(), result.gpr());
3215             int52Result(result.gpr(), node, op1.format());
3216             return;
3217         }
3218         
3219         SpeculateInt52Operand op1(this, node->child1());
3220         SpeculateInt52Operand op2(this, node->child2());
3221         GPRTemporary result(this);
3222         m_jit.move(op1.gpr(), result.gpr());
3223         speculationCheck(
3224             Int52Overflow, JSValueRegs(), 0,
3225             m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3226         int52Result(result.gpr(), node);
3227         return;
3228     }
3229 #endif // USE(JSVALUE64)
3230
3231     case NumberUse: {
3232         SpeculateDoubleOperand op1(this, node->child1());
3233         SpeculateDoubleOperand op2(this, node->child2());
3234         FPRTemporary result(this, op1);
3235
3236         FPRReg reg1 = op1.fpr();
3237         FPRReg reg2 = op2.fpr();
3238         m_jit.subDouble(reg1, reg2, result.fpr());
3239
3240         doubleResult(result.fpr(), node);
3241         return;
3242     }
3243         
3244     default:
3245         RELEASE_ASSERT_NOT_REACHED();
3246         return;
3247     }
3248 }
3249
3250 void SpeculativeJIT::compileArithNegate(Node* node)
3251 {
3252     switch (node->child1().useKind()) {
3253     case Int32Use: {
3254         SpeculateInt32Operand op1(this, node->child1());
3255         GPRTemporary result(this);
3256
3257         m_jit.move(op1.gpr(), result.gpr());
3258
3259         // Note: there is no notion of being not used as a number, but someone
3260         // caring about negative zero.
3261         
3262         if (bytecodeCanTruncateInteger(node->arithNodeFlags()))
3263             m_jit.neg32(result.gpr());
3264         else if (bytecodeCanIgnoreNegativeZero(node->arithNodeFlags()))
3265             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
3266         else {
3267             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
3268             m_jit.neg32(result.gpr());
3269         }
3270
3271         int32Result(result.gpr(), node);
3272         return;
3273     }
3274
3275 #if USE(JSVALUE64)
3276     case MachineIntUse: {
3277         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)) {
3278             SpeculateWhicheverInt52Operand op1(this, node->child1());
3279             GPRTemporary result(this);
3280             GPRReg op1GPR = op1.gpr();
3281             GPRReg resultGPR = result.gpr();
3282             m_jit.move(op1GPR, resultGPR);
3283             m_jit.neg64(resultGPR);
3284             if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3285                 speculationCheck(
3286                     NegativeZero, JSValueRegs(), 0,
3287                     m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3288             }
3289             int52Result(resultGPR, node, op1.format());
3290             return;
3291         }
3292         
3293         SpeculateInt52Operand op1(this, node->child1());
3294         GPRTemporary result(this);
3295         GPRReg op1GPR = op1.gpr();
3296         GPRReg resultGPR = result.gpr();
3297         m_jit.move(op1GPR, resultGPR);
3298         speculationCheck(
3299             Int52Overflow, JSValueRegs(), 0,
3300             m_jit.branchNeg64(MacroAssembler::Overflow, resultGPR));
3301         if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3302             speculationCheck(
3303                 NegativeZero, JSValueRegs(), 0,
3304                 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3305         }
3306         int52Result(resultGPR, node);
3307         return;
3308     }
3309 #endif // USE(JSVALUE64)
3310         
3311     case NumberUse: {
3312         SpeculateDoubleOperand op1(this, node->child1());
3313         FPRTemporary result(this);
3314         
3315         m_jit.negateDouble(op1.fpr(), result.fpr());
3316         
3317         doubleResult(result.fpr(), node);
3318         return;
3319     }
3320         
3321     default:
3322         RELEASE_ASSERT_NOT_REACHED();
3323         return;
3324     }
3325 }
3326 void SpeculativeJIT::compileArithIMul(Node* node)
3327 {
3328     SpeculateInt32Operand op1(this, node->child1());
3329     SpeculateInt32Operand op2(this, node->child2());
3330     GPRTemporary result(this);
3331
3332     GPRReg reg1 = op1.gpr();
3333     GPRReg reg2 = op2.gpr();
3334
3335     m_jit.move(reg1, result.gpr());
3336     m_jit.mul32(reg2, result.gpr());
3337     int32Result(result.gpr(), node);
3338     return;
3339 }
3340
3341 void SpeculativeJIT::compileArithMul(Node* node)
3342 {
3343     switch (node->binaryUseKind()) {
3344     case Int32Use: {
3345         SpeculateInt32Operand op1(this, node->child1());
3346         SpeculateInt32Operand op2(this, node->child2());
3347         GPRTemporary result(this);
3348
3349         GPRReg reg1 = op1.gpr();
3350         GPRReg reg2 = op2.gpr();
3351
3352         // We can perform truncated multiplications if we get to this point, because if the
3353         // fixup phase could not prove that it would be safe, it would have turned us into
3354         // a double multiplication.
3355         if (bytecodeCanTruncateInteger(node->arithNodeFlags())) {
3356             m_jit.move(reg1, result.gpr());
3357             m_jit.mul32(reg2, result.gpr());
3358         } else {
3359             speculationCheck(
3360                 Overflow, JSValueRegs(), 0,
3361                 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3362         }
3363             
3364         // Check for negative zero, if the users of this node care about such things.
3365         if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3366             MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3367             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
3368             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
3369             resultNonZero.link(&m_jit);
3370         }
3371
3372         int32Result(result.gpr(), node);
3373         return;
3374     }
3375     
3376 #if USE(JSVALUE64)   
3377     case MachineIntUse: {
3378         // This is super clever. We want to do an int52 multiplication and check the
3379         // int52 overflow bit. There is no direct hardware support for this, but we do
3380         // have the ability to do an int64 multiplication and check the int64 overflow
3381         // bit. We leverage that. Consider that a, b are int52 numbers inside int64
3382         // registers, with the high 12 bits being sign-extended. We can do:
3383         //
3384         //     (a * (b << 12))
3385         //
3386         // This will give us a left-shifted int52 (value is in high 52 bits, low 16
3387         // bits are zero) plus the int52 overflow bit. I.e. whether this 64-bit
3388         // multiplication overflows is identical to whether the 'a * b' 52-bit
3389         // multiplication overflows.
3390         //
3391         // In our nomenclature, this is:
3392         //
3393         //     strictInt52(a) * int52(b) => int52
3394         //
3395         // That is "strictInt52" means unshifted and "int52" means left-shifted by 16
3396         // bits.
3397         //
3398         // We don't care which of op1 or op2 serves as the left-shifted operand, so
3399         // we just do whatever is more convenient for op1 and have op2 do the
3400         // opposite. This ensures that we do at most one shift.
3401
3402         SpeculateWhicheverInt52Operand op1(this, node->child1());
3403         SpeculateWhicheverInt52Operand op2(this, node->child2(), OppositeShift, op1);
3404         GPRTemporary result(this);
3405         
3406         GPRReg op1GPR = op1.gpr();
3407         GPRReg op2GPR = op2.gpr();
3408         GPRReg resultGPR = result.gpr();
3409         
3410         m_jit.move(op1GPR, resultGPR);
3411         speculationCheck(
3412             Int52Overflow, JSValueRegs(), 0,
3413             m_jit.branchMul64(MacroAssembler::Overflow, op2GPR, resultGPR));
3414         
3415         if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3416             MacroAssembler::Jump resultNonZero = m_jit.branchTest64(
3417                 MacroAssembler::NonZero, resultGPR);
3418             speculationCheck(
3419                 NegativeZero, JSValueRegs(), 0,
3420                 m_jit.branch64(MacroAssembler::LessThan, op1GPR, TrustedImm64(0)));
3421             speculationCheck(
3422                 NegativeZero, JSValueRegs(), 0,
3423                 m_jit.branch64(MacroAssembler::LessThan, op2GPR, TrustedImm64(0)));
3424             resultNonZero.link(&m_jit);
3425         }
3426         
3427         int52Result(resultGPR, node);
3428         return;
3429     }
3430 #endif // USE(JSVALUE64)
3431         
3432     case NumberUse: {
3433         SpeculateDoubleOperand op1(this, node->child1());
3434         SpeculateDoubleOperand op2(this, node->child2());
3435         FPRTemporary result(this, op1, op2);
3436         
3437         FPRReg reg1 = op1.fpr();
3438         FPRReg reg2 = op2.fpr();
3439         
3440         m_jit.mulDouble(reg1, reg2, result.fpr());
3441         
3442         doubleResult(result.fpr(), node);
3443         return;
3444     }
3445         
3446     default:
3447         RELEASE_ASSERT_NOT_REACHED();
3448         return;
3449     }
3450 }
3451
3452 void SpeculativeJIT::compileArithDiv(Node* node)
3453 {
3454     switch (node->binaryUseKind()) {
3455     case Int32Use: {
3456 #if CPU(X86) || CPU(X86_64)
3457         SpeculateInt32Operand op1(this, node->child1());
3458         SpeculateInt32Operand op2(this, node->child2());
3459         GPRTemporary eax(this, X86Registers::eax);
3460         GPRTemporary edx(this, X86Registers::edx);
3461         GPRReg op1GPR = op1.gpr();
3462         GPRReg op2GPR = op2.gpr();
3463     
3464         GPRReg op2TempGPR;
3465         GPRReg temp;
3466         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3467             op2TempGPR = allocate();
3468             temp = op2TempGPR;
3469         } else {
3470             op2TempGPR = InvalidGPRReg;
3471             if (op1GPR == X86Registers::eax)
3472                 temp = X86Registers::edx;
3473             else
3474                 temp = X86Registers::eax;
3475         }
3476     
3477         ASSERT(temp != op1GPR);
3478         ASSERT(temp != op2GPR);
3479     
3480         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3481     
3482         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3483     
3484         JITCompiler::JumpList done;
3485         if (bytecodeUsesAsNumber(node->arithNodeFlags())) {
3486             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3487             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3488         } else {
3489             // This is the case where we convert the result to an int after we're done, and we
3490             // already know that the denominator is either -1 or 0. So, if the denominator is
3491             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3492             // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
3493             // are happy to fall through to a normal division, since we're just dividing
3494             // something by negative 1.
3495         
3496             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3497             m_jit.move(TrustedImm32(0), eax.gpr());
3498             done.append(m_jit.jump());
3499         
3500             notZero.link(&m_jit);
3501             JITCompiler::Jump notNeg2ToThe31 =
3502                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3503             m_jit.move(op1GPR, eax.gpr());
3504             done.append(m_jit.jump());
3505         
3506             notNeg2ToThe31.link(&m_jit);
3507         }
3508     
3509         safeDenominator.link(&m_jit);
3510     
3511         // If the user cares about negative zero, then speculate that we're not about
3512         // to produce negative zero.
3513         if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3514             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3515             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3516             numeratorNonZero.link(&m_jit);
3517         }
3518     
3519         if (op2TempGPR != InvalidGPRReg) {
3520             m_jit.move(op2GPR, op2TempGPR);
3521             op2GPR = op2TempGPR;
3522         }
3523             
3524         m_jit.move(op1GPR, eax.gpr());
3525         m_jit.assembler().cdq();
3526         m_jit.assembler().idivl_r(op2GPR);
3527             
3528         if (op2TempGPR != InvalidGPRReg)
3529             unlock(op2TempGPR);
3530
3531         // Check that there was no remainder. If there had been, then we'd be obligated to
3532         // produce a double result instead.
3533         if (bytecodeUsesAsNumber(node->arithNodeFlags()))
3534             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
3535         
3536         done.link(&m_jit);
3537         int32Result(eax.gpr(), node);
3538 #elif CPU(APPLE_ARMV7S)
3539         SpeculateInt32Operand op1(this, node->child1());
3540         SpeculateInt32Operand op2(this, node->child2());
3541         GPRReg op1GPR = op1.gpr();
3542         GPRReg op2GPR = op2.gpr();
3543         GPRTemporary quotient(this);
3544         GPRTemporary multiplyAnswer(this);
3545
3546         // If the user cares about negative zero, then speculate that we're not about
3547         // to produce negative zero.
3548         if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3549             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3550             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3551             numeratorNonZero.link(&m_jit);
3552         }
3553
3554         m_jit.assembler().sdiv(quotient.gpr(), op1GPR, op2GPR);
3555
3556         // Check that there was no remainder. If there had been, then we'd be obligated to
3557         // produce a double result instead.
3558         if (bytecodeUsesAsNumber(node->arithNodeFlags())) {
3559             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
3560             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
3561         }
3562
3563         int32Result(quotient.gpr(), node);
3564 #else
3565         RELEASE_ASSERT_NOT_REACHED();
3566 #endif
3567         break;
3568     }
3569         
3570     case NumberUse: {
3571         SpeculateDoubleOperand op1(this, node->child1());
3572         SpeculateDoubleOperand op2(this, node->child2());
3573         FPRTemporary result(this, op1);
3574         
3575         FPRReg reg1 = op1.fpr();
3576         FPRReg reg2 = op2.fpr();
3577         m_jit.divDouble(reg1, reg2, result.fpr());
3578         
3579         doubleResult(result.fpr(), node);
3580         break;
3581     }
3582         
3583     default:
3584         RELEASE_ASSERT_NOT_REACHED();
3585         break;
3586     }
3587 }
3588
3589 void SpeculativeJIT::compileArithMod(Node* node)
3590 {
3591     switch (node->binaryUseKind()) {
3592     case Int32Use: {
3593         // In the fast path, the dividend value could be the final result
3594         // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
3595         SpeculateStrictInt32Operand op1(this, node->child1());
3596         
3597         if (isInt32Constant(node->child2().node())) {
3598             int32_t divisor = valueOfInt32Constant(node->child2().node());
3599             if (divisor > 0 && hasOneBitSet(divisor)) {
3600                 ASSERT(divisor != 1);
3601                 unsigned logarithm = WTF::fastLog2(divisor);
3602                 GPRReg dividendGPR = op1.gpr();
3603                 GPRTemporary result(this);
3604                 GPRReg resultGPR = result.gpr();
3605
3606                 // This is what LLVM generates. It's pretty crazy. Here's my
3607                 // attempt at understanding it.
3608                 
3609                 // First, compute either divisor - 1, or 0, depending on whether
3610                 // the dividend is negative:
3611                 //
3612                 // If dividend < 0:  resultGPR = divisor - 1
3613                 // If dividend >= 0: resultGPR = 0
3614                 m_jit.move(dividendGPR, resultGPR);
3615                 m_jit.rshift32(TrustedImm32(31), resultGPR);
3616                 m_jit.urshift32(TrustedImm32(32 - logarithm), resultGPR);
3617                 
3618                 // Add in the dividend, so that:
3619                 //
3620                 // If dividend < 0:  resultGPR = dividend + divisor - 1
3621                 // If dividend >= 0: resultGPR = dividend
3622                 m_jit.add32(dividendGPR, resultGPR);
3623                 
3624                 // Mask so as to only get the *high* bits. This rounds down
3625                 // (towards negative infinity) resultGPR to the nearest multiple
3626                 // of divisor, so that:
3627                 //
3628                 // If dividend < 0:  resultGPR = floor((dividend + divisor - 1) / divisor)
3629                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3630                 //
3631                 // Note that this can be simplified to:
3632                 //
3633                 // If dividend < 0:  resultGPR = ceil(dividend / divisor)
3634                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3635                 //
3636                 // Note that if the dividend is negative, resultGPR will also be negative.
3637                 // Regardless of the sign of dividend, resultGPR will be rounded towards
3638                 // zero, because of how things are conditionalized.
3639                 m_jit.and32(TrustedImm32(-divisor), resultGPR);
3640                 
3641                 // Subtract resultGPR from dividendGPR, which yields the remainder:
3642                 //
3643                 // resultGPR = dividendGPR - resultGPR
3644                 m_jit.neg32(resultGPR);
3645                 m_jit.add32(dividendGPR, resultGPR);
3646                 
3647                 if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3648                     // Check that we're not about to create negative zero.
3649                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3650                     speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, resultGPR));
3651                     numeratorPositive.link(&m_jit);
3652                 }
3653
3654                 int32Result(resultGPR, node);
3655                 return;
3656             }
3657         }
3658         
3659 #if CPU(X86) || CPU(X86_64)
3660         if (isInt32Constant(node->child2().node())) {
3661             int32_t divisor = valueOfInt32Constant(node->child2().node());
3662             if (divisor && divisor != -1) {
3663                 GPRReg op1Gpr = op1.gpr();
3664
3665                 GPRTemporary eax(this, X86Registers::eax);
3666                 GPRTemporary edx(this, X86Registers::edx);
3667                 GPRTemporary scratch(this);
3668                 GPRReg scratchGPR = scratch.gpr();
3669
3670                 GPRReg op1SaveGPR;
3671                 if (op1Gpr == X86Registers::eax || op1Gpr == X86Registers::edx) {
3672                     op1SaveGPR = allocate();
3673                     ASSERT(op1Gpr != op1SaveGPR);
3674                     m_jit.move(op1Gpr, op1SaveGPR);
3675                 } else
3676                     op1SaveGPR = op1Gpr;
3677                 ASSERT(op1SaveGPR != X86Registers::eax);
3678                 ASSERT(op1SaveGPR != X86Registers::edx);
3679
3680                 m_jit.move(op1Gpr, eax.gpr());
3681                 m_jit.move(TrustedImm32(divisor), scratchGPR);
3682                 m_jit.assembler().cdq();
3683                 m_jit.assembler().idivl_r(scratchGPR);
3684                 if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3685                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3686                     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3687                     numeratorPositive.link(&m_jit);
3688                 }
3689             
3690                 if (op1SaveGPR != op1Gpr)
3691                     unlock(op1SaveGPR);
3692
3693                 int32Result(edx.gpr(), node);
3694                 return;
3695             }
3696         }
3697 #endif
3698
3699         SpeculateInt32Operand op2(this, node->child2());
3700 #if CPU(X86) || CPU(X86_64)
3701         GPRTemporary eax(this, X86Registers::eax);
3702         GPRTemporary edx(this, X86Registers::edx);
3703         GPRReg op1GPR = op1.gpr();
3704         GPRReg op2GPR = op2.gpr();
3705     
3706         GPRReg op2TempGPR;
3707         GPRReg temp;
3708         GPRReg op1SaveGPR;
3709     
3710         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3711             op2TempGPR = allocate();
3712             temp = op2TempGPR;
3713         } else {
3714             op2TempGPR = InvalidGPRReg;
3715             if (op1GPR == X86Registers::eax)
3716                 temp = X86Registers::edx;
3717             else
3718                 temp = X86Registers::eax;
3719         }
3720     
3721         if (op1GPR == X86Registers::eax || op1GPR == X86Registers::edx) {
3722             op1SaveGPR = allocate();
3723             ASSERT(op1GPR != op1SaveGPR);
3724             m_jit.move(op1GPR, op1SaveGPR);
3725         } else
3726             op1SaveGPR = op1GPR;
3727     
3728         ASSERT(temp != op1GPR);
3729         ASSERT(temp != op2GPR);
3730         ASSERT(op1SaveGPR != X86Registers::eax);
3731         ASSERT(op1SaveGPR != X86Registers::edx);
3732     
3733         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3734     
3735         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3736     
3737         JITCompiler::JumpList done;
3738         
3739         // FIXME: -2^31 / -1 will actually yield negative zero, so we could have a
3740         // separate case for that. But it probably doesn't matter so much.
3741         if (bytecodeUsesAsNumber(node->arithNodeFlags())) {
3742             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3743             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3744         } else {
3745             // This is the case where we convert the result to an int after we're done, and we
3746             // already know that the denominator is either -1 or 0. So, if the denominator is
3747             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3748             // -1) and the numerator is -2^31 then the result should be 0. Otherwise we are
3749             // happy to fall through to a normal division, since we're just dividing something
3750             // by negative 1.
3751         
3752             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3753             m_jit.move(TrustedImm32(0), edx.gpr());
3754             done.append(m_jit.jump());
3755         
3756             notZero.link(&m_jit);
3757             JITCompiler::Jump notNeg2ToThe31 =
3758                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3759             m_jit.move(TrustedImm32(0), edx.gpr());
3760             done.append(m_jit.jump());
3761         
3762             notNeg2ToThe31.link(&m_jit);
3763         }
3764         
3765         safeDenominator.link(&m_jit);
3766             
3767         if (op2TempGPR != InvalidGPRReg) {
3768             m_jit.move(op2GPR, op2TempGPR);
3769             op2GPR = op2TempGPR;
3770         }
3771             
3772         m_jit.move(op1GPR, eax.gpr());
3773         m_jit.assembler().cdq();
3774         m_jit.assembler().idivl_r(op2GPR);
3775             
3776         if (op2TempGPR != InvalidGPRReg)
3777             unlock(op2TempGPR);
3778
3779         // Check that we're not about to create negative zero.
3780         if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3781             JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3782             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3783             numeratorPositive.link(&m_jit);
3784         }
3785     
3786         if (op1SaveGPR != op1GPR)
3787             unlock(op1SaveGPR);
3788             
3789         done.link(&m_jit);
3790         int32Result(edx.gpr(), node);
3791
3792 #elif CPU(APPLE_ARMV7S)
3793         GPRTemporary temp(this);