Unreviewed, rolling out r156019 and r156020.
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "Arguments.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGBinarySwitch.h"
35 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
36 #include "DFGSaneStringGetByValSlowPathGenerator.h"
37 #include "DFGSlowPathGenerator.h"
38 #include "JSCJSValueInlines.h"
39 #include "LinkBuffer.h"
40
41 namespace JSC { namespace DFG {
42
43 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
44     : m_compileOkay(true)
45     , m_jit(jit)
46     , m_currentNode(0)
47     , m_indexInBlock(0)
48     , m_generationInfo(m_jit.codeBlock()->m_numCalleeRegisters)
49     , m_arguments(jit.codeBlock()->numParameters())
50     , m_variables(jit.graph().m_localVars)
51     , m_lastSetOperand(std::numeric_limits<int>::max())
52     , m_state(m_jit.graph())
53     , m_interpreter(m_jit.graph(), m_state)
54     , m_stream(&jit.jitCode()->variableEventStream)
55     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
56     , m_isCheckingArgumentTypes(false)
57 {
58 }
59
60 SpeculativeJIT::~SpeculativeJIT()
61 {
62 }
63
64 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
65 {
66     ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
67     
68     GPRTemporary scratch(this);
69     GPRTemporary scratch2(this);
70     GPRReg scratchGPR = scratch.gpr();
71     GPRReg scratch2GPR = scratch2.gpr();
72     
73     unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
74     
75     JITCompiler::JumpList slowCases;
76     
77     slowCases.append(
78         emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
79     m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
80     emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
81     
82     m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
83     m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
84     
85     if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
86 #if USE(JSVALUE64)
87         m_jit.move(TrustedImm64(bitwise_cast<int64_t>(QNaN)), scratchGPR);
88         for (unsigned i = numElements; i < vectorLength; ++i)
89             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
90 #else
91         EncodedValueDescriptor value;
92         value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, QNaN));
93         for (unsigned i = numElements; i < vectorLength; ++i) {
94             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
95             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
96         }
97 #endif
98     }
99     
100     // I want a slow path that also loads out the storage pointer, and that's
101     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
102     // of work for a very small piece of functionality. :-/
103     addSlowPathGenerator(adoptPtr(
104         new CallArrayAllocatorSlowPathGenerator(
105             slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
106             structure, numElements)));
107 }
108
109 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
110 {
111     if (!m_compileOkay)
112         return;
113     ASSERT(m_isCheckingArgumentTypes || m_canExit);
114     m_jit.appendExitInfo(jumpToFail);
115     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
116 }
117
118 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
119 {
120     if (!m_compileOkay)
121         return;
122     ASSERT(m_isCheckingArgumentTypes || m_canExit);
123     m_jit.appendExitInfo(jumpsToFail);
124     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
125 }
126
127 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
128 {
129     if (!m_compileOkay)
130         return;
131     backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail);
132     if (m_speculationDirection == ForwardSpeculation)
133         convertLastOSRExitToForward();
134 }
135
136 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
137 {
138     ASSERT(m_isCheckingArgumentTypes || m_canExit);
139     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
140 }
141
142 OSRExitJumpPlaceholder SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
143 {
144     if (!m_compileOkay)
145         return OSRExitJumpPlaceholder();
146     ASSERT(m_isCheckingArgumentTypes || m_canExit);
147     unsigned index = m_jit.jitCode()->osrExit.size();
148     m_jit.appendExitInfo();
149     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
150     return OSRExitJumpPlaceholder(index);
151 }
152
153 OSRExitJumpPlaceholder SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
154 {
155     ASSERT(m_isCheckingArgumentTypes || m_canExit);
156     return backwardSpeculationCheck(kind, jsValueSource, nodeUse.node());
157 }
158
159 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
160 {
161     if (!m_compileOkay)
162         return;
163     backwardSpeculationCheck(kind, jsValueSource, node, jumpsToFail);
164     if (m_speculationDirection == ForwardSpeculation)
165         convertLastOSRExitToForward();
166 }
167
168 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
169 {
170     ASSERT(m_isCheckingArgumentTypes || m_canExit);
171     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
172 }
173
174 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
175 {
176     if (!m_compileOkay)
177         return;
178     ASSERT(m_isCheckingArgumentTypes || m_canExit);
179     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
180     m_jit.appendExitInfo(jumpToFail);
181     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
182 }
183
184 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
185 {
186     ASSERT(m_isCheckingArgumentTypes || m_canExit);
187     backwardSpeculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
188 }
189
190 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
191 {
192     if (!m_compileOkay)
193         return;
194     backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail, recovery);
195     if (m_speculationDirection == ForwardSpeculation)
196         convertLastOSRExitToForward();
197 }
198
199 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge edge, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
200 {
201     speculationCheck(kind, jsValueSource, edge.node(), jumpToFail, recovery);
202 }
203
204 JumpReplacementWatchpoint* SpeculativeJIT::speculationWatchpoint(ExitKind kind, JSValueSource jsValueSource, Node* node)
205 {
206     if (!m_compileOkay)
207         return 0;
208     ASSERT(m_isCheckingArgumentTypes || m_canExit);
209     m_jit.appendExitInfo(JITCompiler::JumpList());
210     OSRExit& exit = m_jit.jitCode()->osrExit[
211         m_jit.jitCode()->appendOSRExit(OSRExit(
212             kind, jsValueSource,
213             m_jit.graph().methodOfGettingAValueProfileFor(node),
214             this, m_stream->size()))];
215     exit.m_watchpointIndex = m_jit.jitCode()->appendWatchpoint(
216         JumpReplacementWatchpoint(m_jit.watchpointLabel()));
217     if (m_speculationDirection == ForwardSpeculation)
218         convertLastOSRExitToForward();
219     return &m_jit.jitCode()->watchpoints[exit.m_watchpointIndex];
220 }
221
222 JumpReplacementWatchpoint* SpeculativeJIT::speculationWatchpoint(ExitKind kind)
223 {
224     return speculationWatchpoint(kind, JSValueSource(), 0);
225 }
226
227 void SpeculativeJIT::convertLastOSRExitToForward(const ValueRecovery& valueRecovery)
228 {
229     m_jit.jitCode()->lastOSRExit().convertToForward(
230         m_block, m_currentNode, m_indexInBlock, valueRecovery);
231 }
232
233 void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const ValueRecovery& valueRecovery)
234 {
235     ASSERT(m_isCheckingArgumentTypes || m_canExit);
236     backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail);
237     convertLastOSRExitToForward(valueRecovery);
238 }
239
240 void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail, const ValueRecovery& valueRecovery)
241 {
242     ASSERT(m_isCheckingArgumentTypes || m_canExit);
243     backwardSpeculationCheck(kind, jsValueSource, node, jumpsToFail);
244     convertLastOSRExitToForward(valueRecovery);
245 }
246
247 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
248 {
249     ASSERT(m_isCheckingArgumentTypes || m_canExit);
250 #if DFG_ENABLE(DEBUG_VERBOSE)
251     dataLogF("SpeculativeJIT was terminated.\n");
252 #endif
253     if (!m_compileOkay)
254         return;
255     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
256     m_compileOkay = false;
257 }
258
259 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
260 {
261     ASSERT(m_isCheckingArgumentTypes || m_canExit);
262     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
263 }
264
265 void SpeculativeJIT::backwardTypeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
266 {
267     ASSERT(needsTypeCheck(edge, typesPassedThrough));
268     m_interpreter.filter(edge, typesPassedThrough);
269     backwardSpeculationCheck(BadType, source, edge.node(), jumpToFail);
270 }
271
272 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
273 {
274     backwardTypeCheck(source, edge, typesPassedThrough, jumpToFail);
275     if (m_speculationDirection == ForwardSpeculation)
276         convertLastOSRExitToForward();
277 }
278
279 void SpeculativeJIT::forwardTypeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, const ValueRecovery& valueRecovery)
280 {
281     backwardTypeCheck(source, edge, typesPassedThrough, jumpToFail);
282     convertLastOSRExitToForward(valueRecovery);
283 }
284
285 void SpeculativeJIT::addSlowPathGenerator(PassOwnPtr<SlowPathGenerator> slowPathGenerator)
286 {
287     m_slowPathGenerators.append(slowPathGenerator);
288 }
289
290 void SpeculativeJIT::runSlowPathGenerators()
291 {
292 #if DFG_ENABLE(DEBUG_VERBOSE)
293     dataLogF("Running %lu slow path generators.\n", m_slowPathGenerators.size());
294 #endif
295     for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
296         m_slowPathGenerators[i]->generate(this);
297 }
298
299 // On Windows we need to wrap fmod; on other platforms we can call it directly.
300 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
301 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
302 static double DFG_OPERATION fmodAsDFGOperation(double x, double y)
303 {
304     return fmod(x, y);
305 }
306 #else
307 #define fmodAsDFGOperation fmod
308 #endif
309
310 void SpeculativeJIT::clearGenerationInfo()
311 {
312     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
313         m_generationInfo[i] = GenerationInfo();
314     m_gprs = RegisterBank<GPRInfo>();
315     m_fprs = RegisterBank<FPRInfo>();
316 }
317
318 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
319 {
320     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
321     Node* node = info.node();
322     DataFormat registerFormat = info.registerFormat();
323     ASSERT(registerFormat != DataFormatNone);
324     ASSERT(registerFormat != DataFormatDouble);
325         
326     SilentSpillAction spillAction;
327     SilentFillAction fillAction;
328         
329     if (!info.needsSpill())
330         spillAction = DoNothingForSpill;
331     else {
332 #if USE(JSVALUE64)
333         ASSERT(info.gpr() == source);
334         if (registerFormat == DataFormatInt32)
335             spillAction = Store32Payload;
336         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
337             spillAction = StorePtr;
338         else {
339             ASSERT(registerFormat & DataFormatJS);
340             spillAction = Store64;
341         }
342 #elif USE(JSVALUE32_64)
343         if (registerFormat & DataFormatJS) {
344             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
345             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
346         } else {
347             ASSERT(info.gpr() == source);
348             spillAction = Store32Payload;
349         }
350 #endif
351     }
352         
353     if (registerFormat == DataFormatInt32) {
354         ASSERT(info.gpr() == source);
355         ASSERT(isJSInt32(info.registerFormat()));
356         if (node->hasConstant()) {
357             ASSERT(isInt32Constant(node));
358             fillAction = SetInt32Constant;
359         } else
360             fillAction = Load32Payload;
361     } else if (registerFormat == DataFormatBoolean) {
362 #if USE(JSVALUE64)
363         RELEASE_ASSERT_NOT_REACHED();
364         fillAction = DoNothingForFill;
365 #elif USE(JSVALUE32_64)
366         ASSERT(info.gpr() == source);
367         if (node->hasConstant()) {
368             ASSERT(isBooleanConstant(node));
369             fillAction = SetBooleanConstant;
370         } else
371             fillAction = Load32Payload;
372 #endif
373     } else if (registerFormat == DataFormatCell) {
374         ASSERT(info.gpr() == source);
375         if (node->hasConstant()) {
376             JSValue value = valueOfJSConstant(node);
377             ASSERT_UNUSED(value, value.isCell());
378             fillAction = SetCellConstant;
379         } else {
380 #if USE(JSVALUE64)
381             fillAction = LoadPtr;
382 #else
383             fillAction = Load32Payload;
384 #endif
385         }
386     } else if (registerFormat == DataFormatStorage) {
387         ASSERT(info.gpr() == source);
388         fillAction = LoadPtr;
389     } else {
390         ASSERT(registerFormat & DataFormatJS);
391 #if USE(JSVALUE64)
392         ASSERT(info.gpr() == source);
393         if (node->hasConstant()) {
394             if (valueOfJSConstant(node).isCell())
395                 fillAction = SetTrustedJSConstant;
396             else
397                 fillAction = SetJSConstant;
398         } else if (info.spillFormat() == DataFormatInt32) {
399             ASSERT(registerFormat == DataFormatJSInt32);
400             fillAction = Load32PayloadBoxInt;
401         } else if (info.spillFormat() == DataFormatDouble) {
402             ASSERT(registerFormat == DataFormatJSDouble);
403             fillAction = LoadDoubleBoxDouble;
404         } else
405             fillAction = Load64;
406 #else
407         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
408         if (node->hasConstant())
409             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
410         else if (info.payloadGPR() == source)
411             fillAction = Load32Payload;
412         else { // Fill the Tag
413             switch (info.spillFormat()) {
414             case DataFormatInt32:
415                 ASSERT(registerFormat == DataFormatJSInt32);
416                 fillAction = SetInt32Tag;
417                 break;
418             case DataFormatCell:
419                 ASSERT(registerFormat == DataFormatJSCell);
420                 fillAction = SetCellTag;
421                 break;
422             case DataFormatBoolean:
423                 ASSERT(registerFormat == DataFormatJSBoolean);
424                 fillAction = SetBooleanTag;
425                 break;
426             default:
427                 fillAction = Load32Tag;
428                 break;
429             }
430         }
431 #endif
432     }
433         
434     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
435 }
436     
437 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
438 {
439     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
440     Node* node = info.node();
441     ASSERT(info.registerFormat() == DataFormatDouble);
442
443     SilentSpillAction spillAction;
444     SilentFillAction fillAction;
445         
446     if (!info.needsSpill())
447         spillAction = DoNothingForSpill;
448     else {
449         ASSERT(!node->hasConstant());
450         ASSERT(info.spillFormat() == DataFormatNone);
451         ASSERT(info.fpr() == source);
452         spillAction = StoreDouble;
453     }
454         
455 #if USE(JSVALUE64)
456     if (node->hasConstant()) {
457         ASSERT(isNumberConstant(node));
458         fillAction = SetDoubleConstant;
459     } else if (info.spillFormat() != DataFormatNone && info.spillFormat() != DataFormatDouble) {
460         // it was already spilled previously and not as a double, which means we need unboxing.
461         ASSERT(info.spillFormat() & DataFormatJS);
462         fillAction = LoadJSUnboxDouble;
463     } else
464         fillAction = LoadDouble;
465 #elif USE(JSVALUE32_64)
466     ASSERT(info.registerFormat() == DataFormatDouble || info.registerFormat() == DataFormatJSDouble);
467     if (node->hasConstant()) {
468         ASSERT(isNumberConstant(node));
469         fillAction = SetDoubleConstant;
470     } else
471         fillAction = LoadDouble;
472 #endif
473
474     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
475 }
476     
477 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
478 {
479     switch (plan.spillAction()) {
480     case DoNothingForSpill:
481         break;
482     case Store32Tag:
483         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
484         break;
485     case Store32Payload:
486         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
487         break;
488     case StorePtr:
489         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
490         break;
491 #if USE(JSVALUE64)
492     case Store64:
493         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
494         break;
495 #endif
496     case StoreDouble:
497         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
498         break;
499     default:
500         RELEASE_ASSERT_NOT_REACHED();
501     }
502 }
503     
504 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
505 {
506 #if USE(JSVALUE32_64)
507     UNUSED_PARAM(canTrample);
508 #endif
509     switch (plan.fillAction()) {
510     case DoNothingForFill:
511         break;
512     case SetInt32Constant:
513         m_jit.move(Imm32(valueOfInt32Constant(plan.node())), plan.gpr());
514         break;
515     case SetBooleanConstant:
516         m_jit.move(TrustedImm32(valueOfBooleanConstant(plan.node())), plan.gpr());
517         break;
518     case SetCellConstant:
519         m_jit.move(TrustedImmPtr(valueOfJSConstant(plan.node()).asCell()), plan.gpr());
520         break;
521 #if USE(JSVALUE64)
522     case SetTrustedJSConstant:
523         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
524         break;
525     case SetJSConstant:
526         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
527         break;
528     case SetDoubleConstant:
529         m_jit.move(Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(plan.node()))), canTrample);
530         m_jit.move64ToDouble(canTrample, plan.fpr());
531         break;
532     case Load32PayloadBoxInt:
533         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
534         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
535         break;
536     case LoadDoubleBoxDouble:
537         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
538         m_jit.sub64(GPRInfo::tagTypeNumberRegister, plan.gpr());
539         break;
540     case LoadJSUnboxDouble:
541         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), canTrample);
542         unboxDouble(canTrample, plan.fpr());
543         break;
544 #else
545     case SetJSConstantTag:
546         m_jit.move(Imm32(valueOfJSConstant(plan.node()).tag()), plan.gpr());
547         break;
548     case SetJSConstantPayload:
549         m_jit.move(Imm32(valueOfJSConstant(plan.node()).payload()), plan.gpr());
550         break;
551     case SetInt32Tag:
552         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
553         break;
554     case SetCellTag:
555         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
556         break;
557     case SetBooleanTag:
558         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
559         break;
560     case SetDoubleConstant:
561         m_jit.loadDouble(addressOfDoubleConstant(plan.node()), plan.fpr());
562         break;
563 #endif
564     case Load32Tag:
565         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
566         break;
567     case Load32Payload:
568         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
569         break;
570     case LoadPtr:
571         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
572         break;
573 #if USE(JSVALUE64)
574     case Load64:
575         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
576         break;
577 #endif
578     case LoadDouble:
579         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
580         break;
581     default:
582         RELEASE_ASSERT_NOT_REACHED();
583     }
584 }
585     
586 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
587 {
588     switch (arrayMode.arrayClass()) {
589     case Array::OriginalArray: {
590         CRASH();
591         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
592         return result;
593     }
594         
595     case Array::Array:
596         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
597         return m_jit.branch32(
598             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
599         
600     default:
601         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
602         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
603     }
604 }
605
606 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
607 {
608     JITCompiler::JumpList result;
609     
610     switch (arrayMode.type()) {
611     case Array::Int32:
612         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
613
614     case Array::Double:
615         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
616
617     case Array::Contiguous:
618         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
619
620     case Array::ArrayStorage:
621     case Array::SlowPutArrayStorage: {
622         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
623         
624         if (arrayMode.isJSArray()) {
625             if (arrayMode.isSlowPut()) {
626                 result.append(
627                     m_jit.branchTest32(
628                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
629                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
630                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
631                 result.append(
632                     m_jit.branch32(
633                         MacroAssembler::Above, tempGPR,
634                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
635                 break;
636             }
637             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
638             result.append(
639                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
640             break;
641         }
642         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
643         if (arrayMode.isSlowPut()) {
644             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
645             result.append(
646                 m_jit.branch32(
647                     MacroAssembler::Above, tempGPR,
648                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
649             break;
650         }
651         result.append(
652             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
653         break;
654     }
655     default:
656         CRASH();
657         break;
658     }
659     
660     return result;
661 }
662
663 void SpeculativeJIT::checkArray(Node* node)
664 {
665     ASSERT(node->arrayMode().isSpecific());
666     ASSERT(!node->arrayMode().doesConversion());
667     
668     SpeculateCellOperand base(this, node->child1());
669     GPRReg baseReg = base.gpr();
670     
671     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
672         noResult(m_currentNode);
673         return;
674     }
675     
676     const ClassInfo* expectedClassInfo = 0;
677     
678     switch (node->arrayMode().type()) {
679     case Array::String:
680         expectedClassInfo = JSString::info();
681         break;
682     case Array::Int32:
683     case Array::Double:
684     case Array::Contiguous:
685     case Array::ArrayStorage:
686     case Array::SlowPutArrayStorage: {
687         GPRTemporary temp(this);
688         GPRReg tempGPR = temp.gpr();
689         m_jit.loadPtr(
690             MacroAssembler::Address(baseReg, JSCell::structureOffset()), tempGPR);
691         m_jit.load8(MacroAssembler::Address(tempGPR, Structure::indexingTypeOffset()), tempGPR);
692         speculationCheck(
693             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
694             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
695         
696         noResult(m_currentNode);
697         return;
698     }
699     case Array::Arguments:
700         expectedClassInfo = Arguments::info();
701         break;
702     default:
703         expectedClassInfo = classInfoForType(node->arrayMode().typedArrayType());
704         break;
705     }
706     
707     RELEASE_ASSERT(expectedClassInfo);
708     
709     GPRTemporary temp(this);
710     m_jit.loadPtr(
711         MacroAssembler::Address(baseReg, JSCell::structureOffset()), temp.gpr());
712     speculationCheck(
713         BadType, JSValueSource::unboxedCell(baseReg), node,
714         m_jit.branchPtr(
715             MacroAssembler::NotEqual,
716             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
717             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
718     
719     noResult(m_currentNode);
720 }
721
722 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
723 {
724     ASSERT(node->arrayMode().doesConversion());
725     
726     GPRTemporary temp(this);
727     GPRTemporary structure;
728     GPRReg tempGPR = temp.gpr();
729     GPRReg structureGPR = InvalidGPRReg;
730     
731     if (node->op() != ArrayifyToStructure) {
732         GPRTemporary realStructure(this);
733         structure.adopt(realStructure);
734         structureGPR = structure.gpr();
735     }
736         
737     // We can skip all that comes next if we already have array storage.
738     MacroAssembler::JumpList slowPath;
739     
740     if (node->op() == ArrayifyToStructure) {
741         slowPath.append(m_jit.branchWeakPtr(
742             JITCompiler::NotEqual,
743             JITCompiler::Address(baseReg, JSCell::structureOffset()),
744             node->structure()));
745     } else {
746         m_jit.loadPtr(
747             MacroAssembler::Address(baseReg, JSCell::structureOffset()), structureGPR);
748         
749         m_jit.load8(
750             MacroAssembler::Address(structureGPR, Structure::indexingTypeOffset()), tempGPR);
751         
752         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
753     }
754     
755     addSlowPathGenerator(adoptPtr(new ArrayifySlowPathGenerator(
756         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR)));
757     
758     noResult(m_currentNode);
759 }
760
761 void SpeculativeJIT::arrayify(Node* node)
762 {
763     ASSERT(node->arrayMode().isSpecific());
764     
765     SpeculateCellOperand base(this, node->child1());
766     
767     if (!node->child2()) {
768         arrayify(node, base.gpr(), InvalidGPRReg);
769         return;
770     }
771     
772     SpeculateInt32Operand property(this, node->child2());
773     
774     arrayify(node, base.gpr(), property.gpr());
775 }
776
777 GPRReg SpeculativeJIT::fillStorage(Edge edge)
778 {
779     VirtualRegister virtualRegister = edge->virtualRegister();
780     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
781     
782     switch (info.registerFormat()) {
783     case DataFormatNone: {
784         if (info.spillFormat() == DataFormatStorage) {
785             GPRReg gpr = allocate();
786             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
787             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
788             info.fillStorage(*m_stream, gpr);
789             return gpr;
790         }
791         
792         // Must be a cell; fill it as a cell and then return the pointer.
793         return fillSpeculateCell(edge);
794     }
795         
796     case DataFormatStorage: {
797         GPRReg gpr = info.gpr();
798         m_gprs.lock(gpr);
799         return gpr;
800     }
801         
802     default:
803         return fillSpeculateCell(edge);
804     }
805 }
806
807 void SpeculativeJIT::useChildren(Node* node)
808 {
809     if (node->flags() & NodeHasVarArgs) {
810         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
811             if (!!m_jit.graph().m_varArgChildren[childIdx])
812                 use(m_jit.graph().m_varArgChildren[childIdx]);
813         }
814     } else {
815         Edge child1 = node->child1();
816         if (!child1) {
817             ASSERT(!node->child2() && !node->child3());
818             return;
819         }
820         use(child1);
821         
822         Edge child2 = node->child2();
823         if (!child2) {
824             ASSERT(!node->child3());
825             return;
826         }
827         use(child2);
828         
829         Edge child3 = node->child3();
830         if (!child3)
831             return;
832         use(child3);
833     }
834 }
835
836 void SpeculativeJIT::writeBarrier(MacroAssembler& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2, WriteBarrierUseKind useKind)
837 {
838     UNUSED_PARAM(jit);
839     UNUSED_PARAM(owner);
840     UNUSED_PARAM(scratch1);
841     UNUSED_PARAM(scratch2);
842     UNUSED_PARAM(useKind);
843     ASSERT(owner != scratch1);
844     ASSERT(owner != scratch2);
845     ASSERT(scratch1 != scratch2);
846
847 #if ENABLE(WRITE_BARRIER_PROFILING)
848     JITCompiler::emitCount(jit, WriteBarrierCounters::jitCounterFor(useKind));
849 #endif
850 }
851
852 void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind useKind, GPRReg scratch1, GPRReg scratch2)
853 {
854     UNUSED_PARAM(ownerGPR);
855     UNUSED_PARAM(valueGPR);
856     UNUSED_PARAM(scratch1);
857     UNUSED_PARAM(scratch2);
858     UNUSED_PARAM(useKind);
859
860     if (isKnownNotCell(valueUse.node()))
861         return;
862
863 #if ENABLE(WRITE_BARRIER_PROFILING)
864     JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
865 #endif
866 }
867
868 void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, JSCell* value, WriteBarrierUseKind useKind, GPRReg scratch1, GPRReg scratch2)
869 {
870     UNUSED_PARAM(ownerGPR);
871     UNUSED_PARAM(value);
872     UNUSED_PARAM(scratch1);
873     UNUSED_PARAM(scratch2);
874     UNUSED_PARAM(useKind);
875     
876     if (Heap::isMarked(value))
877         return;
878
879 #if ENABLE(WRITE_BARRIER_PROFILING)
880     JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
881 #endif
882 }
883
884 void SpeculativeJIT::writeBarrier(JSCell* owner, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind useKind, GPRReg scratch)
885 {
886     UNUSED_PARAM(owner);
887     UNUSED_PARAM(valueGPR);
888     UNUSED_PARAM(scratch);
889     UNUSED_PARAM(useKind);
890
891     if (isKnownNotCell(valueUse.node()))
892         return;
893
894 #if ENABLE(WRITE_BARRIER_PROFILING)
895     JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
896 #endif
897 }
898
899 void SpeculativeJIT::compileIn(Node* node)
900 {
901     SpeculateCellOperand base(this, node->child2());
902     GPRReg baseGPR = base.gpr();
903         
904     if (isConstant(node->child1().node())) {
905         JSString* string =
906             jsDynamicCast<JSString*>(valueOfJSConstant(node->child1().node()));
907         if (string && string->tryGetValueImpl()
908             && string->tryGetValueImpl()->isIdentifier()) {
909             GPRTemporary result(this);
910             GPRReg resultGPR = result.gpr();
911
912             use(node->child1());
913                 
914             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
915             
916             OwnPtr<SlowPathGenerator> slowPath = slowPathCall(
917                 jump.m_jump, this, operationInOptimize,
918                 JSValueRegs::payloadOnly(resultGPR), baseGPR,
919                 string->tryGetValueImpl());
920                 
921             m_jit.addIn(InRecord(
922                 node->codeOrigin, jump, slowPath.get(), safeCast<int8_t>(baseGPR),
923                 safeCast<int8_t>(resultGPR), usedRegisters()));
924             addSlowPathGenerator(slowPath.release());
925                 
926             base.use();
927                 
928 #if USE(JSVALUE64)
929             jsValueResult(
930                 resultGPR, node, DataFormatJSBoolean, UseChildrenCalledExplicitly);
931 #else
932             booleanResult(resultGPR, node, UseChildrenCalledExplicitly);
933 #endif
934             return;
935         }
936     }
937         
938     JSValueOperand key(this, node->child1());
939     JSValueRegs regs = key.jsValueRegs();
940         
941     GPRResult result(this);
942     GPRReg resultGPR = result.gpr();
943         
944     base.use();
945     key.use();
946         
947     flushRegisters();
948     callOperation(
949         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
950         baseGPR, regs);
951 #if USE(JSVALUE64)
952     jsValueResult(resultGPR, node, DataFormatJSBoolean, UseChildrenCalledExplicitly);
953 #else
954     booleanResult(resultGPR, node, UseChildrenCalledExplicitly);
955 #endif
956 }
957
958 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction)
959 {
960     unsigned branchIndexInBlock = detectPeepHoleBranch();
961     if (branchIndexInBlock != UINT_MAX) {
962         Node* branchNode = m_block->at(branchIndexInBlock);
963
964         ASSERT(node->adjustedRefCount() == 1);
965         
966         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
967     
968         m_indexInBlock = branchIndexInBlock;
969         m_currentNode = branchNode;
970         
971         return true;
972     }
973     
974     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
975     
976     return false;
977 }
978
979 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
980 {
981     unsigned branchIndexInBlock = detectPeepHoleBranch();
982     if (branchIndexInBlock != UINT_MAX) {
983         Node* branchNode = m_block->at(branchIndexInBlock);
984
985         ASSERT(node->adjustedRefCount() == 1);
986         
987         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
988     
989         m_indexInBlock = branchIndexInBlock;
990         m_currentNode = branchNode;
991         
992         return true;
993     }
994     
995     nonSpeculativeNonPeepholeStrictEq(node, invert);
996     
997     return false;
998 }
999
1000 #ifndef NDEBUG
1001 static const char* dataFormatString(DataFormat format)
1002 {
1003     // These values correspond to the DataFormat enum.
1004     const char* strings[] = {
1005         "[  ]",
1006         "[ i]",
1007         "[ d]",
1008         "[ c]",
1009         "Err!",
1010         "Err!",
1011         "Err!",
1012         "Err!",
1013         "[J ]",
1014         "[Ji]",
1015         "[Jd]",
1016         "[Jc]",
1017         "Err!",
1018         "Err!",
1019         "Err!",
1020         "Err!",
1021     };
1022     return strings[format];
1023 }
1024
1025 void SpeculativeJIT::dump(const char* label)
1026 {
1027     if (label)
1028         dataLogF("<%s>\n", label);
1029
1030     dataLogF("  gprs:\n");
1031     m_gprs.dump();
1032     dataLogF("  fprs:\n");
1033     m_fprs.dump();
1034     dataLogF("  VirtualRegisters:\n");
1035     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1036         GenerationInfo& info = m_generationInfo[i];
1037         if (info.alive())
1038             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1039         else
1040             dataLogF("    % 3d:[__][__]", i);
1041         if (info.registerFormat() == DataFormatDouble)
1042             dataLogF(":fpr%d\n", info.fpr());
1043         else if (info.registerFormat() != DataFormatNone
1044 #if USE(JSVALUE32_64)
1045             && !(info.registerFormat() & DataFormatJS)
1046 #endif
1047             ) {
1048             ASSERT(info.gpr() != InvalidGPRReg);
1049             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1050         } else
1051             dataLogF("\n");
1052     }
1053     if (label)
1054         dataLogF("</%s>\n", label);
1055 }
1056 #endif
1057
1058
1059 #if DFG_ENABLE(CONSISTENCY_CHECK)
1060 void SpeculativeJIT::checkConsistency()
1061 {
1062     bool failed = false;
1063
1064     for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
1065         if (iter.isLocked()) {
1066             dataLogF("DFG_CONSISTENCY_CHECK failed: gpr %s is locked.\n", iter.debugName());
1067             failed = true;
1068         }
1069     }
1070     for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
1071         if (iter.isLocked()) {
1072             dataLogF("DFG_CONSISTENCY_CHECK failed: fpr %s is locked.\n", iter.debugName());
1073             failed = true;
1074         }
1075     }
1076
1077     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1078         VirtualRegister virtualRegister = (VirtualRegister)i;
1079         GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1080         if (!info.alive())
1081             continue;
1082         switch (info.registerFormat()) {
1083         case DataFormatNone:
1084             break;
1085         case DataFormatJS:
1086         case DataFormatJSInt32:
1087         case DataFormatJSDouble:
1088         case DataFormatJSCell:
1089         case DataFormatJSBoolean:
1090 #if USE(JSVALUE32_64)
1091             break;
1092 #endif
1093         case DataFormatInt32:
1094         case DataFormatCell:
1095         case DataFormatBoolean:
1096         case DataFormatStorage: {
1097             GPRReg gpr = info.gpr();
1098             ASSERT(gpr != InvalidGPRReg);
1099             if (m_gprs.name(gpr) != virtualRegister) {
1100                 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (gpr %s).\n", virtualRegister, GPRInfo::debugName(gpr));
1101                 failed = true;
1102             }
1103             break;
1104         }
1105         case DataFormatDouble: {
1106             FPRReg fpr = info.fpr();
1107             ASSERT(fpr != InvalidFPRReg);
1108             if (m_fprs.name(fpr) != virtualRegister) {
1109                 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (fpr %s).\n", virtualRegister, FPRInfo::debugName(fpr));
1110                 failed = true;
1111             }
1112             break;
1113         }
1114         case DataFormatOSRMarker:
1115         case DataFormatDead:
1116         case DataFormatArguments:
1117             RELEASE_ASSERT_NOT_REACHED();
1118             break;
1119         }
1120     }
1121
1122     for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
1123         VirtualRegister virtualRegister = iter.name();
1124         if (virtualRegister == InvalidVirtualRegister)
1125             continue;
1126
1127         GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1128 #if USE(JSVALUE64)
1129         if (iter.regID() != info.gpr()) {
1130             dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
1131             failed = true;
1132         }
1133 #else
1134         if (!(info.registerFormat() & DataFormatJS)) {
1135             if (iter.regID() != info.gpr()) {
1136                 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
1137                 failed = true;
1138             }
1139         } else {
1140             if (iter.regID() != info.tagGPR() && iter.regID() != info.payloadGPR()) {
1141                 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
1142                 failed = true;
1143             }
1144         }
1145 #endif
1146     }
1147
1148     for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
1149         VirtualRegister virtualRegister = iter.name();
1150         if (virtualRegister == InvalidVirtualRegister)
1151             continue;
1152
1153         GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1154         if (iter.regID() != info.fpr()) {
1155             dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for fpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
1156             failed = true;
1157         }
1158     }
1159
1160     if (failed) {
1161         dump();
1162         CRASH();
1163     }
1164 }
1165 #endif
1166
1167 GPRTemporary::GPRTemporary()
1168     : m_jit(0)
1169     , m_gpr(InvalidGPRReg)
1170 {
1171 }
1172
1173 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1174     : m_jit(jit)
1175     , m_gpr(InvalidGPRReg)
1176 {
1177     m_gpr = m_jit->allocate();
1178 }
1179
1180 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1181     : m_jit(jit)
1182     , m_gpr(InvalidGPRReg)
1183 {
1184     m_gpr = m_jit->allocate(specific);
1185 }
1186
1187 #if USE(JSVALUE32_64)
1188 GPRTemporary::GPRTemporary(
1189     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1190     : m_jit(jit)
1191     , m_gpr(InvalidGPRReg)
1192 {
1193     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1194         m_gpr = m_jit->reuse(op1.gpr(which));
1195     else
1196         m_gpr = m_jit->allocate();
1197 }
1198 #endif // USE(JSVALUE32_64)
1199
1200 void GPRTemporary::adopt(GPRTemporary& other)
1201 {
1202     ASSERT(!m_jit);
1203     ASSERT(m_gpr == InvalidGPRReg);
1204     ASSERT(other.m_jit);
1205     ASSERT(other.m_gpr != InvalidGPRReg);
1206     m_jit = other.m_jit;
1207     m_gpr = other.m_gpr;
1208     other.m_jit = 0;
1209     other.m_gpr = InvalidGPRReg;
1210 }
1211
1212 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1213     : m_jit(jit)
1214     , m_fpr(InvalidFPRReg)
1215 {
1216     m_fpr = m_jit->fprAllocate();
1217 }
1218
1219 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1220     : m_jit(jit)
1221     , m_fpr(InvalidFPRReg)
1222 {
1223     if (m_jit->canReuse(op1.node()))
1224         m_fpr = m_jit->reuse(op1.fpr());
1225     else
1226         m_fpr = m_jit->fprAllocate();
1227 }
1228
1229 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1230     : m_jit(jit)
1231     , m_fpr(InvalidFPRReg)
1232 {
1233     if (m_jit->canReuse(op1.node()))
1234         m_fpr = m_jit->reuse(op1.fpr());
1235     else if (m_jit->canReuse(op2.node()))
1236         m_fpr = m_jit->reuse(op2.fpr());
1237     else
1238         m_fpr = m_jit->fprAllocate();
1239 }
1240
1241 #if USE(JSVALUE32_64)
1242 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1243     : m_jit(jit)
1244     , m_fpr(InvalidFPRReg)
1245 {
1246     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1247         m_fpr = m_jit->reuse(op1.fpr());
1248     else
1249         m_fpr = m_jit->fprAllocate();
1250 }
1251 #endif
1252
1253 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1254 {
1255     BasicBlock* taken = branchNode->takenBlock();
1256     BasicBlock* notTaken = branchNode->notTakenBlock();
1257     
1258     SpeculateDoubleOperand op1(this, node->child1());
1259     SpeculateDoubleOperand op2(this, node->child2());
1260     
1261     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1262     jump(notTaken);
1263 }
1264
1265 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1266 {
1267     BasicBlock* taken = branchNode->takenBlock();
1268     BasicBlock* notTaken = branchNode->notTakenBlock();
1269
1270     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1271     
1272     if (taken == nextBlock()) {
1273         condition = MacroAssembler::NotEqual;
1274         BasicBlock* tmp = taken;
1275         taken = notTaken;
1276         notTaken = tmp;
1277     }
1278
1279     SpeculateCellOperand op1(this, node->child1());
1280     SpeculateCellOperand op2(this, node->child2());
1281     
1282     GPRReg op1GPR = op1.gpr();
1283     GPRReg op2GPR = op2.gpr();
1284     
1285     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1286         speculationWatchpointForMasqueradesAsUndefined();
1287
1288         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1289             speculationCheck(
1290                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), 
1291                 m_jit.branchPtr(
1292                     MacroAssembler::Equal, 
1293                     MacroAssembler::Address(op1GPR, JSCell::structureOffset()), 
1294                     MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1295         }
1296         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1297             speculationCheck(
1298                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1299                 m_jit.branchPtr(
1300                     MacroAssembler::Equal, 
1301                     MacroAssembler::Address(op2GPR, JSCell::structureOffset()), 
1302                     MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1303         }
1304     } else {
1305         GPRTemporary structure(this);
1306         GPRReg structureGPR = structure.gpr();
1307
1308         m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR);
1309         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1310             speculationCheck(
1311                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1312                 m_jit.branchPtr(
1313                     MacroAssembler::Equal, 
1314                     structureGPR, 
1315                     MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1316         }
1317         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1318             m_jit.branchTest8(
1319                 MacroAssembler::NonZero, 
1320                 MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), 
1321                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1322
1323         m_jit.loadPtr(MacroAssembler::Address(op2GPR, JSCell::structureOffset()), structureGPR);
1324         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1325             speculationCheck(
1326                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1327                 m_jit.branchPtr(
1328                     MacroAssembler::Equal, 
1329                     structureGPR, 
1330                     MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1331         }
1332         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1333             m_jit.branchTest8(
1334                 MacroAssembler::NonZero, 
1335                 MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), 
1336                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1337     }
1338
1339     branchPtr(condition, op1GPR, op2GPR, taken);
1340     jump(notTaken);
1341 }
1342
1343 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1344 {
1345     BasicBlock* taken = branchNode->takenBlock();
1346     BasicBlock* notTaken = branchNode->notTakenBlock();
1347
1348     // The branch instruction will branch to the taken block.
1349     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1350     if (taken == nextBlock()) {
1351         condition = JITCompiler::invert(condition);
1352         BasicBlock* tmp = taken;
1353         taken = notTaken;
1354         notTaken = tmp;
1355     }
1356
1357     if (isBooleanConstant(node->child1().node())) {
1358         bool imm = valueOfBooleanConstant(node->child1().node());
1359         SpeculateBooleanOperand op2(this, node->child2());
1360         branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1361     } else if (isBooleanConstant(node->child2().node())) {
1362         SpeculateBooleanOperand op1(this, node->child1());
1363         bool imm = valueOfBooleanConstant(node->child2().node());
1364         branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1365     } else {
1366         SpeculateBooleanOperand op1(this, node->child1());
1367         SpeculateBooleanOperand op2(this, node->child2());
1368         branch32(condition, op1.gpr(), op2.gpr(), taken);
1369     }
1370
1371     jump(notTaken);
1372 }
1373
1374 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1375 {
1376     BasicBlock* taken = branchNode->takenBlock();
1377     BasicBlock* notTaken = branchNode->notTakenBlock();
1378
1379     // The branch instruction will branch to the taken block.
1380     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1381     if (taken == nextBlock()) {
1382         condition = JITCompiler::invert(condition);
1383         BasicBlock* tmp = taken;
1384         taken = notTaken;
1385         notTaken = tmp;
1386     }
1387
1388     if (isInt32Constant(node->child1().node())) {
1389         int32_t imm = valueOfInt32Constant(node->child1().node());
1390         SpeculateInt32Operand op2(this, node->child2());
1391         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1392     } else if (isInt32Constant(node->child2().node())) {
1393         SpeculateInt32Operand op1(this, node->child1());
1394         int32_t imm = valueOfInt32Constant(node->child2().node());
1395         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1396     } else {
1397         SpeculateInt32Operand op1(this, node->child1());
1398         SpeculateInt32Operand op2(this, node->child2());
1399         branch32(condition, op1.gpr(), op2.gpr(), taken);
1400     }
1401
1402     jump(notTaken);
1403 }
1404
1405 // Returns true if the compare is fused with a subsequent branch.
1406 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_DFGOperation_EJJ operation)
1407 {
1408     // Fused compare & branch.
1409     unsigned branchIndexInBlock = detectPeepHoleBranch();
1410     if (branchIndexInBlock != UINT_MAX) {
1411         Node* branchNode = m_block->at(branchIndexInBlock);
1412
1413         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1414         // so can be no intervening nodes to also reference the compare. 
1415         ASSERT(node->adjustedRefCount() == 1);
1416
1417         if (node->isBinaryUseKind(Int32Use))
1418             compilePeepHoleInt32Branch(node, branchNode, condition);
1419         else if (node->isBinaryUseKind(NumberUse))
1420             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1421         else if (node->op() == CompareEq) {
1422             if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1423                 // Use non-peephole comparison, for now.
1424                 return false;
1425             }
1426             if (node->isBinaryUseKind(BooleanUse))
1427                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1428             else if (node->isBinaryUseKind(ObjectUse))
1429                 compilePeepHoleObjectEquality(node, branchNode);
1430             else if (node->child1().useKind() == ObjectUse && node->child2().useKind() == ObjectOrOtherUse)
1431                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1432             else if (node->child1().useKind() == ObjectOrOtherUse && node->child2().useKind() == ObjectUse)
1433                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1434             else {
1435                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1436                 return true;
1437             }
1438         } else {
1439             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1440             return true;
1441         }
1442
1443         use(node->child1());
1444         use(node->child2());
1445         m_indexInBlock = branchIndexInBlock;
1446         m_currentNode = branchNode;
1447         return true;
1448     }
1449     return false;
1450 }
1451
1452 void SpeculativeJIT::noticeOSRBirth(Node* node)
1453 {
1454     if (!node->hasVirtualRegister())
1455         return;
1456     
1457     VirtualRegister virtualRegister = node->virtualRegister();
1458     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1459     
1460     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1461 }
1462
1463 void SpeculativeJIT::compileMovHint(Node* node)
1464 {
1465     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1466     
1467     m_lastSetOperand = node->local();
1468
1469     Node* child = node->child1().node();
1470     noticeOSRBirth(child);
1471     
1472     if (child->op() == UInt32ToNumber)
1473         noticeOSRBirth(child->child1().node());
1474     
1475     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->local()));
1476 }
1477
1478 void SpeculativeJIT::compileMovHintAndCheck(Node* node)
1479 {
1480     compileMovHint(node);
1481     speculate(node, node->child1());
1482     noResult(node);
1483 }
1484
1485 void SpeculativeJIT::compileInlineStart(Node* node)
1486 {
1487     InlineCallFrame* inlineCallFrame = node->codeOrigin.inlineCallFrame;
1488     int argumentCountIncludingThis = inlineCallFrame->arguments.size();
1489     unsigned argumentPositionStart = node->argumentPositionStart();
1490     CodeBlock* codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
1491     for (int i = 0; i < argumentCountIncludingThis; ++i) {
1492         ValueRecovery recovery;
1493         if (codeBlock->isCaptured(argumentToOperand(i)))
1494             recovery = ValueRecovery::alreadyInJSStack();
1495         else {
1496             ArgumentPosition& argumentPosition =
1497                 m_jit.graph().m_argumentPositions[argumentPositionStart + i];
1498             ValueSource valueSource;
1499             switch (argumentPosition.flushFormat()) {
1500             case DeadFlush:
1501             case FlushedJSValue:
1502                 valueSource = ValueSource(ValueInJSStack);
1503                 break;
1504             case FlushedDouble:
1505                 valueSource = ValueSource(DoubleInJSStack);
1506                 break;
1507             case FlushedInt32:
1508                 valueSource = ValueSource(Int32InJSStack);
1509                 break;
1510             case FlushedCell:
1511                 valueSource = ValueSource(CellInJSStack);
1512                 break;
1513             case FlushedBoolean:
1514                 valueSource = ValueSource(BooleanInJSStack);
1515                 break;
1516             }
1517             recovery = computeValueRecoveryFor(valueSource);
1518         }
1519         // The recovery should refer either to something that has already been
1520         // stored into the stack at the right place, or to a constant,
1521         // since the Arguments code isn't smart enough to handle anything else.
1522         // The exception is the this argument, which we don't really need to be
1523         // able to recover.
1524 #if DFG_ENABLE(DEBUG_VERBOSE)
1525         dataLogF("\nRecovery for argument %d: ", i);
1526         recovery.dump(WTF::dataFile());
1527 #endif
1528         inlineCallFrame->arguments[i] = recovery;
1529     }
1530 }
1531
1532 void SpeculativeJIT::bail()
1533 {
1534     m_compileOkay = true;
1535     m_jit.breakpoint();
1536     clearGenerationInfo();
1537 }
1538
1539 void SpeculativeJIT::compileCurrentBlock()
1540 {
1541     ASSERT(m_compileOkay);
1542     
1543     if (!m_block)
1544         return;
1545     
1546     ASSERT(m_block->isReachable);
1547     
1548     if (!m_block->cfaHasVisited) {
1549         // Don't generate code for basic blocks that are unreachable according to CFA.
1550         // But to be sure that nobody has generated a jump to this block, drop in a
1551         // breakpoint here.
1552         m_jit.breakpoint();
1553         return;
1554     }
1555
1556     m_jit.blockHeads()[m_block->index] = m_jit.label();
1557 #if DFG_ENABLE(JIT_BREAK_ON_EVERY_BLOCK)
1558     m_jit.breakpoint();
1559 #endif
1560     
1561 #if DFG_ENABLE(DEBUG_VERBOSE)
1562     dataLog("Setting up state for block ", *m_block, ": ");
1563 #endif
1564     
1565     m_stream->appendAndLog(VariableEvent::reset());
1566     
1567     m_jit.jitAssertHasValidCallFrame();
1568
1569     ASSERT(m_arguments.size() == m_block->variablesAtHead.numberOfArguments());
1570     for (size_t i = 0; i < m_arguments.size(); ++i) {
1571         ValueSource valueSource = ValueSource(ValueInJSStack);
1572         m_arguments[i] = valueSource;
1573         m_stream->appendAndLog(VariableEvent::setLocal(argumentToOperand(i), valueSource.dataFormat()));
1574     }
1575     
1576     m_state.reset();
1577     m_state.beginBasicBlock(m_block);
1578     
1579     ASSERT(m_variables.size() == m_block->variablesAtHead.numberOfLocals());
1580     for (size_t i = 0; i < m_variables.size(); ++i) {
1581         Node* node = m_block->variablesAtHead.local(i);
1582         ValueSource valueSource;
1583         if (!node)
1584             valueSource = ValueSource(SourceIsDead);
1585         else if (node->variableAccessData()->isArgumentsAlias())
1586             valueSource = ValueSource(ArgumentsSource);
1587         else if (!node->refCount())
1588             valueSource = ValueSource(SourceIsDead);
1589         else
1590             valueSource = ValueSource::forFlushFormat(node->variableAccessData()->flushFormat());
1591         m_variables[i] = valueSource;
1592         // FIXME: Don't emit SetLocal(Dead). https://bugs.webkit.org/show_bug.cgi?id=108019
1593         m_stream->appendAndLog(VariableEvent::setLocal(localToOperand(i), valueSource.dataFormat()));
1594     }
1595     
1596     m_lastSetOperand = std::numeric_limits<int>::max();
1597     m_codeOriginForExitTarget = CodeOrigin();
1598     m_codeOriginForExitProfile = CodeOrigin();
1599     
1600 #if DFG_ENABLE(DEBUG_VERBOSE)
1601     dataLogF("\n");
1602 #endif
1603
1604     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1605         m_currentNode = m_block->at(m_indexInBlock);
1606         
1607         // We may have his a contradiction that the CFA was aware of but that the JIT
1608         // didn't cause directly.
1609         if (!m_state.isValid()) {
1610             bail();
1611             return;
1612         }
1613         
1614         m_canExit = m_currentNode->canExit();
1615         bool shouldExecuteEffects = m_interpreter.startExecuting(m_currentNode);
1616         m_jit.setForNode(m_currentNode);
1617         m_codeOriginForExitTarget = m_currentNode->codeOriginForExitTarget;
1618         m_codeOriginForExitProfile = m_currentNode->codeOrigin;
1619         if (!m_currentNode->shouldGenerate()) {
1620 #if DFG_ENABLE(DEBUG_VERBOSE)
1621             dataLogF("SpeculativeJIT skipping Node @%d (bc#%u) at JIT offset 0x%x     ", m_currentNode->index(), m_currentNode->codeOrigin.bytecodeIndex, m_jit.debugOffset());
1622 #endif
1623             switch (m_currentNode->op()) {
1624             case JSConstant:
1625                 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1626                 break;
1627                 
1628             case WeakJSConstant:
1629                 m_jit.addWeakReference(m_currentNode->weakConstant());
1630                 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1631                 break;
1632                 
1633             case SetLocal:
1634                 RELEASE_ASSERT_NOT_REACHED();
1635                 break;
1636                 
1637             case MovHint:
1638                 compileMovHint(m_currentNode);
1639                 break;
1640                 
1641             case ZombieHint: {
1642                 m_lastSetOperand = m_currentNode->local();
1643                 m_stream->appendAndLog(VariableEvent::setLocal(m_currentNode->local(), DataFormatDead));
1644                 break;
1645             }
1646
1647             default:
1648                 if (belongsInMinifiedGraph(m_currentNode->op()))
1649                     m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1650                 break;
1651             }
1652         } else {
1653             
1654             if (verboseCompilationEnabled()) {
1655                 dataLogF(
1656                     "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1657                     (int)m_currentNode->index(),
1658                     m_currentNode->codeOrigin.bytecodeIndex, m_jit.debugOffset());
1659 #if DFG_ENABLE(DEBUG_VERBOSE)
1660                 dataLog("   ");
1661 #else
1662                 dataLog("\n");
1663 #endif
1664             }
1665 #if DFG_ENABLE(JIT_BREAK_ON_EVERY_NODE)
1666             m_jit.breakpoint();
1667 #endif
1668 #if DFG_ENABLE(XOR_DEBUG_AID)
1669             m_jit.xorPtr(JITCompiler::TrustedImm32(m_currentNode->index()), GPRInfo::regT0);
1670             m_jit.xorPtr(JITCompiler::TrustedImm32(m_currentNode->index()), GPRInfo::regT0);
1671 #endif
1672             checkConsistency();
1673             
1674             m_speculationDirection = (m_currentNode->flags() & NodeExitsForward) ? ForwardSpeculation : BackwardSpeculation;
1675             
1676             compile(m_currentNode);
1677             if (!m_compileOkay) {
1678                 bail();
1679                 return;
1680             }
1681             
1682             if (belongsInMinifiedGraph(m_currentNode->op())) {
1683                 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1684                 noticeOSRBirth(m_currentNode);
1685             }
1686             
1687 #if DFG_ENABLE(DEBUG_VERBOSE)
1688             if (m_currentNode->hasResult()) {
1689                 GenerationInfo& info = m_generationInfo[m_currentNode->virtualRegister()];
1690                 dataLogF("-> %s, vr#%d", dataFormatToString(info.registerFormat()), (int)m_currentNode->virtualRegister());
1691                 if (info.registerFormat() != DataFormatNone) {
1692                     if (info.registerFormat() == DataFormatDouble)
1693                         dataLogF(", %s", FPRInfo::debugName(info.fpr()));
1694 #if USE(JSVALUE32_64)
1695                     else if (info.registerFormat() & DataFormatJS)
1696                         dataLogF(", %s %s", GPRInfo::debugName(info.tagGPR()), GPRInfo::debugName(info.payloadGPR()));
1697 #endif
1698                     else
1699                         dataLogF(", %s", GPRInfo::debugName(info.gpr()));
1700                 }
1701                 dataLogF("    ");
1702             } else
1703                 dataLogF("    ");
1704 #endif
1705         }
1706         
1707 #if DFG_ENABLE(DEBUG_VERBOSE)
1708         dataLogF("\n");
1709 #endif
1710         
1711         // Make sure that the abstract state is rematerialized for the next node.
1712         if (shouldExecuteEffects)
1713             m_interpreter.executeEffects(m_indexInBlock);
1714         
1715         if (m_currentNode->shouldGenerate())
1716             checkConsistency();
1717     }
1718     
1719     // Perform the most basic verification that children have been used correctly.
1720 #if !ASSERT_DISABLED
1721     for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1722         GenerationInfo& info = m_generationInfo[index];
1723         ASSERT(!info.alive());
1724     }
1725 #endif
1726 }
1727
1728 // If we are making type predictions about our arguments then
1729 // we need to check that they are correct on function entry.
1730 void SpeculativeJIT::checkArgumentTypes()
1731 {
1732     ASSERT(!m_currentNode);
1733     m_isCheckingArgumentTypes = true;
1734     m_speculationDirection = BackwardSpeculation;
1735     m_codeOriginForExitTarget = CodeOrigin(0);
1736     m_codeOriginForExitProfile = CodeOrigin(0);
1737
1738     for (size_t i = 0; i < m_arguments.size(); ++i)
1739         m_arguments[i] = ValueSource(ValueInJSStack);
1740     for (size_t i = 0; i < m_variables.size(); ++i)
1741         m_variables[i] = ValueSource(ValueInJSStack);
1742     
1743     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1744         Node* node = m_jit.graph().m_arguments[i];
1745         ASSERT(node->op() == SetArgument);
1746         if (!node->shouldGenerate()) {
1747             // The argument is dead. We don't do any checks for such arguments.
1748             continue;
1749         }
1750         
1751         VariableAccessData* variableAccessData = node->variableAccessData();
1752         FlushFormat format = variableAccessData->flushFormat();
1753         
1754         if (format == FlushedJSValue)
1755             continue;
1756         
1757         VirtualRegister virtualRegister = variableAccessData->local();
1758
1759         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1760         
1761 #if USE(JSVALUE64)
1762         switch (format) {
1763         case FlushedInt32: {
1764             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1765             break;
1766         }
1767         case FlushedBoolean: {
1768             GPRTemporary temp(this);
1769             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1770             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1771             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1772             break;
1773         }
1774         case FlushedCell: {
1775             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1776             break;
1777         }
1778         default:
1779             RELEASE_ASSERT_NOT_REACHED();
1780             break;
1781         }
1782 #else
1783         switch (format) {
1784         case FlushedInt32: {
1785             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1786             break;
1787         }
1788         case FlushedBoolean: {
1789             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1790             break;
1791         }
1792         case FlushedCell: {
1793             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1794             break;
1795         }
1796         default:
1797             RELEASE_ASSERT_NOT_REACHED();
1798             break;
1799         }
1800 #endif
1801     }
1802     m_isCheckingArgumentTypes = false;
1803 }
1804
1805 bool SpeculativeJIT::compile()
1806 {
1807     checkArgumentTypes();
1808
1809     ASSERT(!m_currentNode);
1810     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1811         m_jit.setForBlockIndex(blockIndex);
1812         m_block = m_jit.graph().block(blockIndex);
1813         compileCurrentBlock();
1814     }
1815     linkBranches();
1816     return true;
1817 }
1818
1819 void SpeculativeJIT::createOSREntries()
1820 {
1821     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1822         BasicBlock* block = m_jit.graph().block(blockIndex);
1823         if (!block)
1824             continue;
1825         if (!block->isOSRTarget)
1826             continue;
1827
1828         // Currently we don't have OSR entry trampolines. We could add them
1829         // here if need be.
1830         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1831     }
1832 }
1833
1834 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1835 {
1836     unsigned osrEntryIndex = 0;
1837     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1838         BasicBlock* block = m_jit.graph().block(blockIndex);
1839         if (!block)
1840             continue;
1841         if (!block->isOSRTarget)
1842             continue;
1843         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1844     }
1845     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1846 }
1847
1848 ValueRecovery SpeculativeJIT::computeValueRecoveryFor(const ValueSource& valueSource)
1849 {
1850     if (valueSource.isInJSStack())
1851         return valueSource.valueRecovery();
1852         
1853     ASSERT(valueSource.kind() == HaveNode);
1854     Node* node = valueSource.id().node(m_jit.graph());
1855     if (isConstant(node))
1856         return ValueRecovery::constant(valueOfJSConstant(node));
1857     
1858     return ValueRecovery();
1859 }
1860
1861 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1862 {
1863     Edge child3 = m_jit.graph().varArgChild(node, 2);
1864     Edge child4 = m_jit.graph().varArgChild(node, 3);
1865
1866     ArrayMode arrayMode = node->arrayMode();
1867     
1868     GPRReg baseReg = base.gpr();
1869     GPRReg propertyReg = property.gpr();
1870     
1871     SpeculateDoubleOperand value(this, child3);
1872
1873     FPRReg valueReg = value.fpr();
1874     
1875     DFG_TYPE_CHECK(
1876         JSValueRegs(), child3, SpecRealNumber,
1877         m_jit.branchDouble(
1878             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1879     
1880     if (!m_compileOkay)
1881         return;
1882     
1883     StorageOperand storage(this, child4);
1884     GPRReg storageReg = storage.gpr();
1885
1886     if (node->op() == PutByValAlias) {
1887         // Store the value to the array.
1888         GPRReg propertyReg = property.gpr();
1889         FPRReg valueReg = value.fpr();
1890         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1891         
1892         noResult(m_currentNode);
1893         return;
1894     }
1895     
1896     GPRTemporary temporary;
1897     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1898
1899     MacroAssembler::Jump slowCase;
1900     
1901     if (arrayMode.isInBounds()) {
1902         speculationCheck(
1903             StoreToHoleOrOutOfBounds, JSValueRegs(), 0,
1904             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1905     } else {
1906         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1907         
1908         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1909         
1910         if (!arrayMode.isOutOfBounds())
1911             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1912         
1913         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1914         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1915         
1916         inBounds.link(&m_jit);
1917     }
1918     
1919     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1920
1921     base.use();
1922     property.use();
1923     value.use();
1924     storage.use();
1925     
1926     if (arrayMode.isOutOfBounds()) {
1927         addSlowPathGenerator(
1928             slowPathCall(
1929                 slowCase, this,
1930                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1931                 NoResult, baseReg, propertyReg, valueReg));
1932     }
1933
1934     noResult(m_currentNode, UseChildrenCalledExplicitly);
1935 }
1936
1937 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1938 {
1939     SpeculateCellOperand string(this, node->child1());
1940     SpeculateStrictInt32Operand index(this, node->child2());
1941     StorageOperand storage(this, node->child3());
1942
1943     GPRReg stringReg = string.gpr();
1944     GPRReg indexReg = index.gpr();
1945     GPRReg storageReg = storage.gpr();
1946     
1947     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1948
1949     // unsigned comparison so we can filter out negative indices and indices that are too large
1950     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1951
1952     GPRTemporary scratch(this);
1953     GPRReg scratchReg = scratch.gpr();
1954
1955     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1956
1957     // Load the character into scratchReg
1958     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1959
1960     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1961     JITCompiler::Jump cont8Bit = m_jit.jump();
1962
1963     is16Bit.link(&m_jit);
1964
1965     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1966
1967     cont8Bit.link(&m_jit);
1968
1969     int32Result(scratchReg, m_currentNode);
1970 }
1971
1972 void SpeculativeJIT::compileGetByValOnString(Node* node)
1973 {
1974     SpeculateCellOperand base(this, node->child1());
1975     SpeculateStrictInt32Operand property(this, node->child2());
1976     StorageOperand storage(this, node->child3());
1977     GPRReg baseReg = base.gpr();
1978     GPRReg propertyReg = property.gpr();
1979     GPRReg storageReg = storage.gpr();
1980
1981     GPRTemporary scratch(this);
1982     GPRReg scratchReg = scratch.gpr();
1983 #if USE(JSVALUE32_64)
1984     GPRTemporary resultTag;
1985     GPRReg resultTagReg = InvalidGPRReg;
1986     if (node->arrayMode().isOutOfBounds()) {
1987         GPRTemporary realResultTag(this);
1988         resultTag.adopt(realResultTag);
1989         resultTagReg = resultTag.gpr();
1990     }
1991 #endif
1992
1993     if (node->arrayMode().isOutOfBounds()) {
1994         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->codeOrigin);
1995         if (globalObject->stringPrototypeChainIsSane()) {
1996             m_jit.addLazily(
1997                 speculationWatchpoint(),
1998                 globalObject->stringPrototype()->structure()->transitionWatchpointSet());
1999             m_jit.addLazily(
2000                 speculationWatchpoint(),
2001                 globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2002         }
2003     }
2004
2005     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2006
2007     // unsigned comparison so we can filter out negative indices and indices that are too large
2008     JITCompiler::Jump outOfBounds = m_jit.branch32(
2009         MacroAssembler::AboveOrEqual, propertyReg,
2010         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
2011     if (node->arrayMode().isInBounds())
2012         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
2013
2014     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
2015
2016     // Load the character into scratchReg
2017     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2018
2019     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
2020     JITCompiler::Jump cont8Bit = m_jit.jump();
2021
2022     is16Bit.link(&m_jit);
2023
2024     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
2025
2026     JITCompiler::Jump bigCharacter =
2027         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
2028
2029     // 8 bit string values don't need the isASCII check.
2030     cont8Bit.link(&m_jit);
2031
2032 #if CPU(X86)
2033     // Don't have enough register, construct our own indexed address and load.
2034     m_jit.lshift32(MacroAssembler::TrustedImm32(2), scratchReg);
2035     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
2036     m_jit.loadPtr(scratchReg, scratchReg);
2037 #else
2038     GPRTemporary smallStrings(this);
2039     GPRReg smallStringsReg = smallStrings.gpr();
2040     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2041     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, scratchReg, MacroAssembler::ScalePtr, 0), scratchReg);
2042 #endif
2043
2044     addSlowPathGenerator(
2045         slowPathCall(
2046             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
2047
2048     if (node->arrayMode().isOutOfBounds()) {
2049 #if USE(JSVALUE32_64)
2050         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
2051 #endif
2052
2053         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->codeOrigin);
2054         if (globalObject->stringPrototypeChainIsSane()) {
2055 #if USE(JSVALUE64)
2056             addSlowPathGenerator(adoptPtr(new SaneStringGetByValSlowPathGenerator(
2057                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg)));
2058 #else
2059             addSlowPathGenerator(adoptPtr(new SaneStringGetByValSlowPathGenerator(
2060                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
2061                 baseReg, propertyReg)));
2062 #endif
2063         } else {
2064 #if USE(JSVALUE64)
2065             addSlowPathGenerator(
2066                 slowPathCall(
2067                     outOfBounds, this, operationGetByValStringInt,
2068                     scratchReg, baseReg, propertyReg));
2069 #else
2070             addSlowPathGenerator(
2071                 slowPathCall(
2072                     outOfBounds, this, operationGetByValStringInt,
2073                     resultTagReg, scratchReg, baseReg, propertyReg));
2074 #endif
2075         }
2076         
2077 #if USE(JSVALUE64)
2078         jsValueResult(scratchReg, m_currentNode);
2079 #else
2080         jsValueResult(resultTagReg, scratchReg, m_currentNode);
2081 #endif
2082     } else
2083         cellResult(scratchReg, m_currentNode);
2084 }
2085
2086 void SpeculativeJIT::compileFromCharCode(Node* node)
2087 {
2088     SpeculateStrictInt32Operand property(this, node->child1());
2089     GPRReg propertyReg = property.gpr();
2090     GPRTemporary smallStrings(this);
2091     GPRTemporary scratch(this);
2092     GPRReg scratchReg = scratch.gpr();
2093     GPRReg smallStringsReg = smallStrings.gpr();
2094
2095     JITCompiler::JumpList slowCases;
2096     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2097     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2098     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2099
2100     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2101     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2102     cellResult(scratchReg, m_currentNode);
2103 }
2104
2105 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2106 {
2107 #if DFG_ENABLE(DEBUG_VERBOSE)
2108     dataLogF("checkGeneratedTypeForToInt32@%d   ", node->index());
2109 #endif
2110     VirtualRegister virtualRegister = node->virtualRegister();
2111     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2112
2113     switch (info.registerFormat()) {
2114     case DataFormatStorage:
2115         RELEASE_ASSERT_NOT_REACHED();
2116
2117     case DataFormatBoolean:
2118     case DataFormatCell:
2119         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2120         return GeneratedOperandTypeUnknown;
2121
2122     case DataFormatNone:
2123     case DataFormatJSCell:
2124     case DataFormatJS:
2125     case DataFormatJSBoolean:
2126         return GeneratedOperandJSValue;
2127
2128     case DataFormatJSInt32:
2129     case DataFormatInt32:
2130         return GeneratedOperandInteger;
2131
2132     case DataFormatJSDouble:
2133     case DataFormatDouble:
2134         return GeneratedOperandDouble;
2135         
2136     default:
2137         RELEASE_ASSERT_NOT_REACHED();
2138         return GeneratedOperandTypeUnknown;
2139     }
2140 }
2141
2142 void SpeculativeJIT::compileValueToInt32(Node* node)
2143 {
2144     switch (node->child1().useKind()) {
2145     case Int32Use: {
2146         SpeculateInt32Operand op1(this, node->child1());
2147         GPRTemporary result(this, Reuse, op1);
2148         m_jit.move(op1.gpr(), result.gpr());
2149         int32Result(result.gpr(), node, op1.format());
2150         return;
2151     }
2152     
2153     case NumberUse:
2154     case NotCellUse: {
2155         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2156         case GeneratedOperandInteger: {
2157             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2158             GPRTemporary result(this, Reuse, op1);
2159             m_jit.move(op1.gpr(), result.gpr());
2160             int32Result(result.gpr(), node, op1.format());
2161             return;
2162         }
2163         case GeneratedOperandDouble: {
2164             GPRTemporary result(this);
2165             SpeculateDoubleOperand op1(this, node->child1(), ManualOperandSpeculation);
2166             FPRReg fpr = op1.fpr();
2167             GPRReg gpr = result.gpr();
2168             JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2169             
2170             addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
2171
2172             int32Result(gpr, node);
2173             return;
2174         }
2175         case GeneratedOperandJSValue: {
2176             GPRTemporary result(this);
2177 #if USE(JSVALUE64)
2178             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2179
2180             GPRReg gpr = op1.gpr();
2181             GPRReg resultGpr = result.gpr();
2182             FPRTemporary tempFpr(this);
2183             FPRReg fpr = tempFpr.fpr();
2184
2185             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2186             JITCompiler::JumpList converted;
2187
2188             if (node->child1().useKind() == NumberUse) {
2189                 DFG_TYPE_CHECK(
2190                     JSValueRegs(gpr), node->child1(), SpecNumber,
2191                     m_jit.branchTest64(
2192                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2193             } else {
2194                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2195                 
2196                 DFG_TYPE_CHECK(
2197                     JSValueRegs(gpr), node->child1(), ~SpecCell,
2198                     m_jit.branchTest64(
2199                         JITCompiler::Zero, gpr, GPRInfo::tagMaskRegister));
2200                 
2201                 // It's not a cell: so true turns into 1 and all else turns into 0.
2202                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2203                 converted.append(m_jit.jump());
2204                 
2205                 isNumber.link(&m_jit);
2206             }
2207
2208             // First, if we get here we have a double encoded as a JSValue
2209             m_jit.move(gpr, resultGpr);
2210             unboxDouble(resultGpr, fpr);
2211
2212             silentSpillAllRegisters(resultGpr);
2213             callOperation(toInt32, resultGpr, fpr);
2214             silentFillAllRegisters(resultGpr);
2215
2216             converted.append(m_jit.jump());
2217
2218             isInteger.link(&m_jit);
2219             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2220
2221             converted.link(&m_jit);
2222 #else
2223             Node* childNode = node->child1().node();
2224             VirtualRegister virtualRegister = childNode->virtualRegister();
2225             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2226
2227             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2228
2229             GPRReg payloadGPR = op1.payloadGPR();
2230             GPRReg resultGpr = result.gpr();
2231         
2232             JITCompiler::JumpList converted;
2233
2234             if (info.registerFormat() == DataFormatJSInt32)
2235                 m_jit.move(payloadGPR, resultGpr);
2236             else {
2237                 GPRReg tagGPR = op1.tagGPR();
2238                 FPRTemporary tempFpr(this);
2239                 FPRReg fpr = tempFpr.fpr();
2240                 FPRTemporary scratch(this);
2241
2242                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2243
2244                 if (node->child1().useKind() == NumberUse) {
2245                     DFG_TYPE_CHECK(
2246                         JSValueRegs(tagGPR, payloadGPR), node->child1(), SpecNumber,
2247                         m_jit.branch32(
2248                             MacroAssembler::AboveOrEqual, tagGPR,
2249                             TrustedImm32(JSValue::LowestTag)));
2250                 } else {
2251                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2252                     
2253                     DFG_TYPE_CHECK(
2254                         JSValueRegs(tagGPR, payloadGPR), node->child1(), ~SpecCell,
2255                         m_jit.branch32(
2256                             JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::CellTag)));
2257                     
2258                     // It's not a cell: so true turns into 1 and all else turns into 0.
2259                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2260                     m_jit.move(TrustedImm32(0), resultGpr);
2261                     converted.append(m_jit.jump());
2262                     
2263                     isBoolean.link(&m_jit);
2264                     m_jit.move(payloadGPR, resultGpr);
2265                     converted.append(m_jit.jump());
2266                     
2267                     isNumber.link(&m_jit);
2268                 }
2269
2270                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2271
2272                 silentSpillAllRegisters(resultGpr);
2273                 callOperation(toInt32, resultGpr, fpr);
2274                 silentFillAllRegisters(resultGpr);
2275
2276                 converted.append(m_jit.jump());
2277
2278                 isInteger.link(&m_jit);
2279                 m_jit.move(payloadGPR, resultGpr);
2280
2281                 converted.link(&m_jit);
2282             }
2283 #endif
2284             int32Result(resultGpr, node);
2285             return;
2286         }
2287         case GeneratedOperandTypeUnknown:
2288             RELEASE_ASSERT(!m_compileOkay);
2289             return;
2290         }
2291         RELEASE_ASSERT_NOT_REACHED();
2292         return;
2293     }
2294     
2295     case BooleanUse: {
2296         SpeculateBooleanOperand op1(this, node->child1());
2297         GPRTemporary result(this, Reuse, op1);
2298         
2299         m_jit.move(op1.gpr(), result.gpr());
2300         m_jit.and32(JITCompiler::TrustedImm32(1), result.gpr());
2301         
2302         int32Result(result.gpr(), node);
2303         return;
2304     }
2305
2306     default:
2307         ASSERT(!m_compileOkay);
2308         return;
2309     }
2310 }
2311
2312 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2313 {
2314     if (!nodeCanSpeculateInt32(node->arithNodeFlags())) {
2315         // We know that this sometimes produces doubles. So produce a double every
2316         // time. This at least allows subsequent code to not have weird conditionals.
2317             
2318         SpeculateInt32Operand op1(this, node->child1());
2319         FPRTemporary result(this);
2320             
2321         GPRReg inputGPR = op1.gpr();
2322         FPRReg outputFPR = result.fpr();
2323             
2324         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2325             
2326         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2327         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2328         positive.link(&m_jit);
2329             
2330         doubleResult(outputFPR, node);
2331         return;
2332     }
2333
2334     SpeculateInt32Operand op1(this, node->child1());
2335     GPRTemporary result(this); // For the benefit of OSR exit, force these to be in different registers. In reality the OSR exit compiler could find cases where you have uint32(%r1) followed by int32(%r1) and then use different registers, but that seems like too much effort.
2336
2337     m_jit.move(op1.gpr(), result.gpr());
2338
2339     // Test the operand is positive. This is a very special speculation check - we actually
2340     // use roll-forward speculation here, where if this fails, we jump to the baseline
2341     // instruction that follows us, rather than the one we're executing right now. We have
2342     // to do this because by this point, the original values necessary to compile whatever
2343     // operation the UInt32ToNumber originated from might be dead.
2344     forwardSpeculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)), ValueRecovery::uint32InGPR(result.gpr()));
2345
2346     int32Result(result.gpr(), node, op1.format());
2347 }
2348
2349 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2350 {
2351     SpeculateDoubleOperand op1(this, node->child1());
2352     FPRTemporary scratch(this);
2353     GPRTemporary result(this);
2354     
2355     FPRReg valueFPR = op1.fpr();
2356     FPRReg scratchFPR = scratch.fpr();
2357     GPRReg resultGPR = result.gpr();
2358
2359     JITCompiler::JumpList failureCases;
2360     bool negZeroCheck = !bytecodeCanIgnoreNegativeZero(node->arithNodeFlags());
2361     m_jit.branchConvertDoubleToInt32(valueFPR, resultGPR, failureCases, scratchFPR, negZeroCheck);
2362     forwardSpeculationCheck(Overflow, JSValueRegs(), 0, failureCases, ValueRecovery::inFPR(valueFPR));
2363
2364     int32Result(resultGPR, node);
2365 }
2366
2367 void SpeculativeJIT::compileInt32ToDouble(Node* node)
2368 {
2369     ASSERT(!isInt32Constant(node->child1().node())); // This should have been constant folded.
2370     
2371     if (isInt32Speculation(m_state.forNode(node->child1()).m_type)) {
2372         SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2373         FPRTemporary result(this);
2374         m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2375         doubleResult(result.fpr(), node);
2376         return;
2377     }
2378     
2379     JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2380     FPRTemporary result(this);
2381     
2382 #if USE(JSVALUE64)
2383     GPRTemporary temp(this);
2384
2385     GPRReg op1GPR = op1.gpr();
2386     GPRReg tempGPR = temp.gpr();
2387     FPRReg resultFPR = result.fpr();
2388     
2389     JITCompiler::Jump isInteger = m_jit.branch64(
2390         MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2391     
2392     if (needsTypeCheck(node->child1(), SpecNumber)) {
2393         if (node->flags() & NodeExitsForward) {
2394             forwardTypeCheck(
2395                 JSValueRegs(op1GPR), node->child1(), SpecNumber,
2396                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister),
2397                 ValueRecovery::inGPR(op1GPR, DataFormatJS));
2398         } else {
2399             backwardTypeCheck(
2400                 JSValueRegs(op1GPR), node->child1(), SpecNumber,
2401                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2402         }
2403     }
2404     
2405     m_jit.move(op1GPR, tempGPR);
2406     unboxDouble(tempGPR, resultFPR);
2407     JITCompiler::Jump done = m_jit.jump();
2408     
2409     isInteger.link(&m_jit);
2410     m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2411     done.link(&m_jit);
2412 #else
2413     FPRTemporary temp(this);
2414     
2415     GPRReg op1TagGPR = op1.tagGPR();
2416     GPRReg op1PayloadGPR = op1.payloadGPR();
2417     FPRReg tempFPR = temp.fpr();
2418     FPRReg resultFPR = result.fpr();
2419     
2420     JITCompiler::Jump isInteger = m_jit.branch32(
2421         MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2422     
2423     if (needsTypeCheck(node->child1(), SpecNumber)) {
2424         if (node->flags() & NodeExitsForward) {
2425             forwardTypeCheck(
2426                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecNumber,
2427                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)),
2428                 ValueRecovery::inPair(op1TagGPR, op1PayloadGPR));
2429         } else {
2430             backwardTypeCheck(
2431                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecNumber,
2432                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2433         }
2434     }
2435     
2436     unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2437     JITCompiler::Jump done = m_jit.jump();
2438     
2439     isInteger.link(&m_jit);
2440     m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2441     done.link(&m_jit);
2442 #endif
2443     
2444     doubleResult(resultFPR, node);
2445 }
2446
2447 static double clampDoubleToByte(double d)
2448 {
2449     d += 0.5;
2450     if (!(d > 0))
2451         d = 0;
2452     else if (d > 255)
2453         d = 255;
2454     return d;
2455 }
2456
2457 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2458 {
2459     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2460     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2461     jit.xorPtr(result, result);
2462     MacroAssembler::Jump clamped = jit.jump();
2463     tooBig.link(&jit);
2464     jit.move(JITCompiler::TrustedImm32(255), result);
2465     clamped.link(&jit);
2466     inBounds.link(&jit);
2467 }
2468
2469 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2470 {
2471     // Unordered compare so we pick up NaN
2472     static const double zero = 0;
2473     static const double byteMax = 255;
2474     static const double half = 0.5;
2475     jit.loadDouble(&zero, scratch);
2476     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2477     jit.loadDouble(&byteMax, scratch);
2478     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2479     
2480     jit.loadDouble(&half, scratch);
2481     // FIXME: This should probably just use a floating point round!
2482     // https://bugs.webkit.org/show_bug.cgi?id=72054
2483     jit.addDouble(source, scratch);
2484     jit.truncateDoubleToInt32(scratch, result);   
2485     MacroAssembler::Jump truncatedInt = jit.jump();
2486     
2487     tooSmall.link(&jit);
2488     jit.xorPtr(result, result);
2489     MacroAssembler::Jump zeroed = jit.jump();
2490     
2491     tooBig.link(&jit);
2492     jit.move(JITCompiler::TrustedImm32(255), result);
2493     
2494     truncatedInt.link(&jit);
2495     zeroed.link(&jit);
2496
2497 }
2498
2499 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2500 {
2501     ASSERT(isInt(type));
2502     
2503     SpeculateCellOperand base(this, node->child1());
2504     SpeculateStrictInt32Operand property(this, node->child2());
2505     StorageOperand storage(this, node->child3());
2506
2507     GPRReg baseReg = base.gpr();
2508     GPRReg propertyReg = property.gpr();
2509     GPRReg storageReg = storage.gpr();
2510
2511     GPRTemporary result(this);
2512     GPRReg resultReg = result.gpr();
2513
2514     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2515
2516     speculationCheck(
2517         Uncountable, JSValueRegs(), 0,
2518         m_jit.branch32(
2519             MacroAssembler::AboveOrEqual, propertyReg,
2520             MacroAssembler::Address(baseReg, JSArrayBufferView::offsetOfLength())));
2521     switch (elementSize(type)) {
2522     case 1:
2523         if (isSigned(type))
2524             m_jit.load8Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2525         else
2526             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2527         break;
2528     case 2:
2529         if (isSigned(type))
2530             m_jit.load16Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2531         else
2532             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2533         break;
2534     case 4:
2535         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2536         break;
2537     default:
2538         CRASH();
2539     }
2540     if (elementSize(type) < 4 || isSigned(type)) {
2541         int32Result(resultReg, node);
2542         return;
2543     }
2544     
2545     ASSERT(elementSize(type) == 4 && !isSigned(type));
2546     if (node->shouldSpeculateInt32()) {
2547         forwardSpeculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)), ValueRecovery::uint32InGPR(resultReg));
2548         int32Result(resultReg, node);
2549         return;
2550     }
2551     
2552     FPRTemporary fresult(this);
2553     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2554     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2555     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2556     positive.link(&m_jit);
2557     doubleResult(fresult.fpr(), node);
2558 }
2559
2560 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2561 {
2562     ASSERT(isInt(type));
2563     
2564     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2565     GPRReg storageReg = storage.gpr();
2566     
2567     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2568     
2569     GPRTemporary value;
2570     GPRReg valueGPR = InvalidGPRReg;
2571     
2572     if (valueUse->isConstant()) {
2573         JSValue jsValue = valueOfJSConstant(valueUse.node());
2574         if (!jsValue.isNumber()) {
2575             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2576             noResult(node);
2577             return;
2578         }
2579         double d = jsValue.asNumber();
2580         if (isClamped(type)) {
2581             ASSERT(elementSize(type) == 1);
2582             d = clampDoubleToByte(d);
2583         }
2584         GPRTemporary scratch(this);
2585         GPRReg scratchReg = scratch.gpr();
2586         m_jit.move(Imm32(toInt32(d)), scratchReg);
2587         value.adopt(scratch);
2588         valueGPR = scratchReg;
2589     } else {
2590         switch (valueUse.useKind()) {
2591         case Int32Use: {
2592             SpeculateInt32Operand valueOp(this, valueUse);
2593             GPRTemporary scratch(this);
2594             GPRReg scratchReg = scratch.gpr();
2595             m_jit.move(valueOp.gpr(), scratchReg);
2596             if (isClamped(type)) {
2597                 ASSERT(elementSize(type) == 1);
2598                 compileClampIntegerToByte(m_jit, scratchReg);
2599             }
2600             value.adopt(scratch);
2601             valueGPR = scratchReg;
2602             break;
2603         }
2604             
2605         case NumberUse: {
2606             if (isClamped(type)) {
2607                 ASSERT(elementSize(type) == 1);
2608                 SpeculateDoubleOperand valueOp(this, valueUse);
2609                 GPRTemporary result(this);
2610                 FPRTemporary floatScratch(this);
2611                 FPRReg fpr = valueOp.fpr();
2612                 GPRReg gpr = result.gpr();
2613                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2614                 value.adopt(result);
2615                 valueGPR = gpr;
2616             } else {
2617                 SpeculateDoubleOperand valueOp(this, valueUse);
2618                 GPRTemporary result(this);
2619                 FPRReg fpr = valueOp.fpr();
2620                 GPRReg gpr = result.gpr();
2621                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2622                 m_jit.xorPtr(gpr, gpr);
2623                 MacroAssembler::Jump fixed = m_jit.jump();
2624                 notNaN.link(&m_jit);
2625                 
2626                 MacroAssembler::Jump failed;
2627                 if (isSigned(type))
2628                     failed = m_jit.branchTruncateDoubleToInt32(fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2629                 else
2630                     failed = m_jit.branchTruncateDoubleToUint32(fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2631                 
2632                 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr));
2633                 
2634                 fixed.link(&m_jit);
2635                 value.adopt(result);
2636                 valueGPR = gpr;
2637             }
2638             break;
2639         }
2640             
2641         default:
2642             RELEASE_ASSERT_NOT_REACHED();
2643             break;
2644         }
2645     }
2646     
2647     ASSERT_UNUSED(valueGPR, valueGPR != property);
2648     ASSERT(valueGPR != base);
2649     ASSERT(valueGPR != storageReg);
2650     MacroAssembler::Jump outOfBounds;
2651     if (node->op() == PutByVal)
2652         outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, property, MacroAssembler::Address(base, JSArrayBufferView::offsetOfLength()));
2653
2654     switch (elementSize(type)) {
2655     case 1:
2656         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2657         break;
2658     case 2:
2659         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2660         break;
2661     case 4:
2662         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2663         break;
2664     default:
2665         CRASH();
2666     }
2667     if (node->op() == PutByVal)
2668         outOfBounds.link(&m_jit);
2669     noResult(node);
2670 }
2671
2672 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2673 {
2674     ASSERT(isFloat(type));
2675     
2676     SpeculateCellOperand base(this, node->child1());
2677     SpeculateStrictInt32Operand property(this, node->child2());
2678     StorageOperand storage(this, node->child3());
2679
2680     GPRReg baseReg = base.gpr();
2681     GPRReg propertyReg = property.gpr();
2682     GPRReg storageReg = storage.gpr();
2683
2684     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2685
2686     FPRTemporary result(this);
2687     FPRReg resultReg = result.fpr();
2688     speculationCheck(
2689         Uncountable, JSValueRegs(), 0,
2690         m_jit.branch32(
2691             MacroAssembler::AboveOrEqual, propertyReg,
2692             MacroAssembler::Address(baseReg, JSArrayBufferView::offsetOfLength())));
2693     switch (elementSize(type)) {
2694     case 4:
2695         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2696         m_jit.convertFloatToDouble(resultReg, resultReg);
2697         break;
2698     case 8: {
2699         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2700         break;
2701     }
2702     default:
2703         RELEASE_ASSERT_NOT_REACHED();
2704     }
2705     
2706     MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, resultReg, resultReg);
2707     static const double NaN = QNaN;
2708     m_jit.loadDouble(&NaN, resultReg);
2709     notNaN.link(&m_jit);
2710     
2711     doubleResult(resultReg, node);
2712 }
2713
2714 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2715 {
2716     ASSERT(isFloat(type));
2717     
2718     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2719     GPRReg storageReg = storage.gpr();
2720     
2721     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2722     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2723
2724     SpeculateDoubleOperand valueOp(this, valueUse);
2725     FPRTemporary scratch(this);
2726     FPRReg valueFPR = valueOp.fpr();
2727     FPRReg scratchFPR = scratch.fpr();
2728
2729     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2730     
2731     MacroAssembler::Jump outOfBounds;
2732     if (node->op() == PutByVal) {
2733         outOfBounds = m_jit.branch32(
2734             MacroAssembler::AboveOrEqual, property,
2735             MacroAssembler::Address(base, JSArrayBufferView::offsetOfLength()));
2736     }
2737     
2738     switch (elementSize(type)) {
2739     case 4: {
2740         m_jit.moveDouble(valueFPR, scratchFPR);
2741         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2742         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2743         break;
2744     }
2745     case 8:
2746         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2747         break;
2748     default:
2749         RELEASE_ASSERT_NOT_REACHED();
2750     }
2751     if (node->op() == PutByVal)
2752         outOfBounds.link(&m_jit);
2753     noResult(node);
2754 }
2755
2756 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg)
2757 {
2758     // Check that prototype is an object.
2759     m_jit.loadPtr(MacroAssembler::Address(prototypeReg, JSCell::structureOffset()), scratchReg);
2760     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(scratchReg));
2761     
2762     // Initialize scratchReg with the value being checked.
2763     m_jit.move(valueReg, scratchReg);
2764     
2765     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2766     MacroAssembler::Label loop(&m_jit);
2767     m_jit.loadPtr(MacroAssembler::Address(scratchReg, JSCell::structureOffset()), scratchReg);
2768 #if USE(JSVALUE64)
2769     m_jit.load64(MacroAssembler::Address(scratchReg, Structure::prototypeOffset()), scratchReg);
2770     MacroAssembler::Jump isInstance = m_jit.branch64(MacroAssembler::Equal, scratchReg, prototypeReg);
2771     m_jit.branchTest64(MacroAssembler::Zero, scratchReg, GPRInfo::tagMaskRegister).linkTo(loop, &m_jit);
2772 #else
2773     m_jit.load32(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), scratchReg);
2774     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2775     m_jit.branchTest32(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2776 #endif
2777     
2778     // No match - result is false.
2779 #if USE(JSVALUE64)
2780     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2781 #else
2782     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2783 #endif
2784     MacroAssembler::Jump putResult = m_jit.jump();
2785     
2786     isInstance.link(&m_jit);
2787 #if USE(JSVALUE64)
2788     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2789 #else
2790     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2791 #endif
2792     
2793     putResult.link(&m_jit);
2794 }
2795
2796 void SpeculativeJIT::compileInstanceOf(Node* node)
2797 {
2798     if (node->child1().useKind() == UntypedUse) {
2799         // It might not be a cell. Speculate less aggressively.
2800         // Or: it might only be used once (i.e. by us), so we get zero benefit
2801         // from speculating any more aggressively than we absolutely need to.
2802         
2803         JSValueOperand value(this, node->child1());
2804         SpeculateCellOperand prototype(this, node->child2());
2805         GPRTemporary scratch(this);
2806         
2807         GPRReg prototypeReg = prototype.gpr();
2808         GPRReg scratchReg = scratch.gpr();
2809         
2810 #if USE(JSVALUE64)
2811         GPRReg valueReg = value.gpr();
2812         MacroAssembler::Jump isCell = m_jit.branchTest64(MacroAssembler::Zero, valueReg, GPRInfo::tagMaskRegister);
2813         m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2814 #else
2815         GPRReg valueTagReg = value.tagGPR();
2816         GPRReg valueReg = value.payloadGPR();
2817         MacroAssembler::Jump isCell = m_jit.branch32(MacroAssembler::Equal, valueTagReg, TrustedImm32(JSValue::CellTag));
2818         m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2819 #endif
2820
2821         MacroAssembler::Jump done = m_jit.jump();
2822         
2823         isCell.link(&m_jit);
2824         
2825         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg);
2826         
2827         done.link(&m_jit);
2828
2829 #if USE(JSVALUE64)
2830         jsValueResult(scratchReg, node, DataFormatJSBoolean);
2831 #else
2832         booleanResult(scratchReg, node);
2833 #endif
2834         return;
2835     }
2836     
2837     SpeculateCellOperand value(this, node->child1());
2838     SpeculateCellOperand prototype(this, node->child2());
2839     
2840     GPRTemporary scratch(this);
2841     
2842     GPRReg valueReg = value.gpr();
2843     GPRReg prototypeReg = prototype.gpr();
2844     GPRReg scratchReg = scratch.gpr();
2845     
2846     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg);
2847
2848 #if USE(JSVALUE64)
2849     jsValueResult(scratchReg, node, DataFormatJSBoolean);
2850 #else
2851     booleanResult(scratchReg, node);
2852 #endif
2853 }
2854
2855 void SpeculativeJIT::compileAdd(Node* node)
2856 {
2857     switch (node->binaryUseKind()) {
2858     case Int32Use: {
2859         if (isNumberConstant(node->child1().node())) {
2860             int32_t imm1 = valueOfInt32Constant(node->child1().node());
2861             SpeculateInt32Operand op2(this, node->child2());
2862             GPRTemporary result(this);
2863
2864             if (bytecodeCanTruncateInteger(node->arithNodeFlags())) {
2865                 m_jit.move(op2.gpr(), result.gpr());
2866                 m_jit.add32(Imm32(imm1), result.gpr());
2867             } else
2868                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
2869
2870             int32Result(result.gpr(), node);
2871             return;
2872         }
2873                 
2874         if (isNumberConstant(node->child2().node())) {
2875             SpeculateInt32Operand op1(this, node->child1());
2876             int32_t imm2 = valueOfInt32Constant(node->child2().node());
2877             GPRTemporary result(this);
2878                 
2879             if (bytecodeCanTruncateInteger(node->arithNodeFlags())) {
2880                 m_jit.move(op1.gpr(), result.gpr());
2881                 m_jit.add32(Imm32(imm2), result.gpr());
2882             } else
2883                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
2884
2885             int32Result(result.gpr(), node);
2886             return;
2887         }
2888                 
2889         SpeculateInt32Operand op1(this, node->child1());
2890         SpeculateInt32Operand op2(this, node->child2());
2891         GPRTemporary result(this, Reuse, op1, op2);
2892
2893         GPRReg gpr1 = op1.gpr();
2894         GPRReg gpr2 = op2.gpr();
2895         GPRReg gprResult = result.gpr();
2896
2897         if (bytecodeCanTruncateInteger(node->arithNodeFlags())) {
2898             if (gpr1 == gprResult)
2899                 m_jit.add32(gpr2, gprResult);
2900             else {
2901                 m_jit.move(gpr2, gprResult);
2902                 m_jit.add32(gpr1, gprResult);
2903             }
2904         } else {
2905             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
2906                 
2907             if (gpr1 == gprResult)
2908                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
2909             else if (gpr2 == gprResult)
2910                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
2911             else
2912                 speculationCheck(Overflow, JSValueRegs(), 0, check);
2913         }
2914
2915         int32Result(gprResult, node);
2916         return;
2917     }
2918     
2919     case NumberUse: {
2920         SpeculateDoubleOperand op1(this, node->child1());
2921         SpeculateDoubleOperand op2(this, node->child2());
2922         FPRTemporary result(this, op1, op2);
2923
2924         FPRReg reg1 = op1.fpr();
2925         FPRReg reg2 = op2.fpr();
2926         m_jit.addDouble(reg1, reg2, result.fpr());
2927
2928         doubleResult(result.fpr(), node);
2929         return;
2930     }
2931         
2932     case UntypedUse: {
2933         RELEASE_ASSERT(node->op() == ValueAdd);
2934         compileValueAdd(node);
2935         return;
2936     }
2937         
2938     default:
2939         RELEASE_ASSERT_NOT_REACHED();
2940         break;
2941     }
2942 }
2943
2944 void SpeculativeJIT::compileMakeRope(Node* node)
2945 {
2946     ASSERT(node->child1().useKind() == KnownStringUse);
2947     ASSERT(node->child2().useKind() == KnownStringUse);
2948     ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
2949     
2950     SpeculateCellOperand op1(this, node->child1());
2951     SpeculateCellOperand op2(this, node->child2());
2952     SpeculateCellOperand op3(this, node->child3());
2953     GPRTemporary result(this);
2954     GPRTemporary allocator(this);
2955     GPRTemporary scratch(this);
2956     
2957     GPRReg opGPRs[3];
2958     unsigned numOpGPRs;
2959     opGPRs[0] = op1.gpr();
2960     opGPRs[1] = op2.gpr();
2961     if (node->child3()) {
2962         opGPRs[2] = op3.gpr();
2963         numOpGPRs = 3;
2964     } else {
2965         opGPRs[2] = InvalidGPRReg;
2966         numOpGPRs = 2;
2967     }
2968     GPRReg resultGPR = result.gpr();
2969     GPRReg allocatorGPR = allocator.gpr();
2970     GPRReg scratchGPR = scratch.gpr();
2971     
2972     JITCompiler::JumpList slowPath;
2973     MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithImmortalStructureDestructor(sizeof(JSRopeString));
2974     m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
2975     emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
2976         
2977     m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
2978     for (unsigned i = 0; i < numOpGPRs; ++i)
2979         m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2980     for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
2981         m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2982     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
2983     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
2984     for (unsigned i = 1; i < numOpGPRs; ++i) {
2985         m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
2986         m_jit.add32(JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR);
2987     }
2988     m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
2989     m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
2990     m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
2991     
2992     switch (numOpGPRs) {
2993     case 2:
2994         addSlowPathGenerator(slowPathCall(
2995             slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
2996         break;
2997     case 3:
2998         addSlowPathGenerator(slowPathCall(
2999             slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
3000         break;
3001     default:
3002         RELEASE_ASSERT_NOT_REACHED();
3003         break;
3004     }
3005         
3006     cellResult(resultGPR, node);
3007 }
3008
3009 void SpeculativeJIT::compileArithSub(Node* node)
3010 {
3011     switch (node->binaryUseKind()) {
3012     case Int32Use: {
3013         if (isNumberConstant(node->child2().node())) {
3014             SpeculateInt32Operand op1(this, node->child1());
3015             int32_t imm2 = valueOfInt32Constant(node->child2().node());
3016             GPRTemporary result(this);
3017
3018             if (bytecodeCanTruncateInteger(node->arithNodeFlags())) {
3019                 m_jit.move(op1.gpr(), result.gpr());
3020                 m_jit.sub32(Imm32(imm2), result.gpr());
3021             } else {
3022 #if ENABLE(JIT_CONSTANT_BLINDING)
3023                 GPRTemporary scratch(this);
3024                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
3025 #else
3026                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
3027 #endif
3028             }
3029
3030             int32Result(result.gpr(), node);
3031             return;
3032         }
3033             
3034         if (isNumberConstant(node->child1().node())) {
3035             int32_t imm1 = valueOfInt32Constant(node->child1().node());
3036             SpeculateInt32Operand op2(this, node->child2());
3037             GPRTemporary result(this);
3038                 
3039             m_jit.move(Imm32(imm1), result.gpr());
3040             if (bytecodeCanTruncateInteger(node->arithNodeFlags()))
3041                 m_jit.sub32(op2.gpr(), result.gpr());
3042             else
3043                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3044                 
3045             int32Result(result.gpr(), node);
3046             return;
3047         }
3048             
3049         SpeculateInt32Operand op1(this, node->child1());
3050         SpeculateInt32Operand op2(this, node->child2());
3051         GPRTemporary result(this);
3052
3053         if (bytecodeCanTruncateInteger(node->arithNodeFlags())) {
3054             m_jit.move(op1.gpr(), result.gpr());
3055             m_jit.sub32(op2.gpr(), result.gpr());
3056         } else
3057             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
3058
3059         int32Result(result.gpr(), node);
3060         return;
3061     }
3062         
3063     case NumberUse: {
3064         SpeculateDoubleOperand op1(this, node->child1());
3065         SpeculateDoubleOperand op2(this, node->child2());
3066         FPRTemporary result(this, op1);
3067
3068         FPRReg reg1 = op1.fpr();
3069         FPRReg reg2 = op2.fpr();
3070         m_jit.subDouble(reg1, reg2, result.fpr());
3071
3072         doubleResult(result.fpr(), node);
3073         return;
3074     }
3075         
3076     default:
3077         RELEASE_ASSERT_NOT_REACHED();
3078         return;
3079     }
3080 }
3081
3082 void SpeculativeJIT::compileArithNegate(Node* node)
3083 {
3084     switch (node->child1().useKind()) {
3085     case Int32Use: {
3086         SpeculateInt32Operand op1(this, node->child1());
3087         GPRTemporary result(this);
3088
3089         m_jit.move(op1.gpr(), result.gpr());
3090
3091         if (bytecodeCanTruncateInteger(node->arithNodeFlags()))
3092             m_jit.neg32(result.gpr());
3093         else if (bytecodeCanIgnoreNegativeZero(node->arithNodeFlags()))
3094             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
3095         else {
3096             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
3097             m_jit.neg32(result.gpr());
3098         }
3099
3100         int32Result(result.gpr(), node);
3101         return;
3102     }
3103         
3104     case NumberUse: {
3105         SpeculateDoubleOperand op1(this, node->child1());
3106         FPRTemporary result(this);
3107         
3108         m_jit.negateDouble(op1.fpr(), result.fpr());
3109         
3110         doubleResult(result.fpr(), node);
3111         return;
3112     }
3113         
3114     default:
3115         RELEASE_ASSERT_NOT_REACHED();
3116         return;
3117     }
3118 }
3119 void SpeculativeJIT::compileArithIMul(Node* node)
3120 {
3121     SpeculateInt32Operand op1(this, node->child1());
3122     SpeculateInt32Operand op2(this, node->child2());
3123     GPRTemporary result(this);
3124
3125     GPRReg reg1 = op1.gpr();
3126     GPRReg reg2 = op2.gpr();
3127
3128     m_jit.move(reg1, result.gpr());
3129     m_jit.mul32(reg2, result.gpr());
3130     int32Result(result.gpr(), node);
3131     return;
3132 }
3133
3134 void SpeculativeJIT::compileArithMul(Node* node)
3135 {
3136     switch (node->binaryUseKind()) {
3137     case Int32Use: {
3138         SpeculateInt32Operand op1(this, node->child1());
3139         SpeculateInt32Operand op2(this, node->child2());
3140         GPRTemporary result(this);
3141
3142         GPRReg reg1 = op1.gpr();
3143         GPRReg reg2 = op2.gpr();
3144
3145         // We can perform truncated multiplications if we get to this point, because if the
3146         // fixup phase could not prove that it would be safe, it would have turned us into
3147         // a double multiplication.
3148         if (bytecodeCanTruncateInteger(node->arithNodeFlags())) {
3149             m_jit.move(reg1, result.gpr());
3150             m_jit.mul32(reg2, result.gpr());
3151         } else {
3152             speculationCheck(
3153                 Overflow, JSValueRegs(), 0,
3154                 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3155         }
3156             
3157         // Check for negative zero, if the users of this node care about such things.
3158         if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3159             MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3160             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
3161             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
3162             resultNonZero.link(&m_jit);
3163         }
3164
3165         int32Result(result.gpr(), node);
3166         return;
3167     }
3168         
3169     case NumberUse: {
3170         SpeculateDoubleOperand op1(this, node->child1());
3171         SpeculateDoubleOperand op2(this, node->child2());
3172         FPRTemporary result(this, op1, op2);
3173         
3174         FPRReg reg1 = op1.fpr();
3175         FPRReg reg2 = op2.fpr();
3176         
3177         m_jit.mulDouble(reg1, reg2, result.fpr());
3178         
3179         doubleResult(result.fpr(), node);
3180         return;
3181     }
3182         
3183     default:
3184         RELEASE_ASSERT_NOT_REACHED();
3185         return;
3186     }
3187 }
3188
3189 void SpeculativeJIT::compileArithDiv(Node* node)
3190 {
3191     switch (node->binaryUseKind()) {
3192     case Int32Use: {
3193 #if CPU(X86) || CPU(X86_64)
3194         SpeculateInt32Operand op1(this, node->child1());
3195         SpeculateInt32Operand op2(this, node->child2());
3196         GPRTemporary eax(this, X86Registers::eax);
3197         GPRTemporary edx(this, X86Registers::edx);
3198         GPRReg op1GPR = op1.gpr();
3199         GPRReg op2GPR = op2.gpr();
3200     
3201         GPRReg op2TempGPR;
3202         GPRReg temp;
3203         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3204             op2TempGPR = allocate();
3205             temp = op2TempGPR;
3206         } else {
3207             op2TempGPR = InvalidGPRReg;
3208             if (op1GPR == X86Registers::eax)
3209                 temp = X86Registers::edx;
3210             else
3211                 temp = X86Registers::eax;
3212         }
3213     
3214         ASSERT(temp != op1GPR);
3215         ASSERT(temp != op2GPR);
3216     
3217         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3218     
3219         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3220     
3221         JITCompiler::JumpList done;
3222         if (bytecodeUsesAsNumber(node->arithNodeFlags())) {
3223             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3224             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3225         } else {
3226             // This is the case where we convert the result to an int after we're done, and we
3227             // already know that the denominator is either -1 or 0. So, if the denominator is
3228             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3229             // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
3230             // are happy to fall through to a normal division, since we're just dividing
3231             // something by negative 1.
3232         
3233             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3234             m_jit.move(TrustedImm32(0), eax.gpr());
3235             done.append(m_jit.jump());
3236         
3237             notZero.link(&m_jit);
3238             JITCompiler::Jump notNeg2ToThe31 =
3239                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3240             m_jit.move(op1GPR, eax.gpr());
3241             done.append(m_jit.jump());
3242         
3243             notNeg2ToThe31.link(&m_jit);
3244         }
3245     
3246         safeDenominator.link(&m_jit);
3247     
3248         // If the user cares about negative zero, then speculate that we're not about
3249         // to produce negative zero.
3250         if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3251             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3252             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3253             numeratorNonZero.link(&m_jit);
3254         }
3255     
3256         if (op2TempGPR != InvalidGPRReg) {
3257             m_jit.move(op2GPR, op2TempGPR);
3258             op2GPR = op2TempGPR;
3259         }
3260             
3261         m_jit.move(op1GPR, eax.gpr());
3262         m_jit.assembler().cdq();
3263         m_jit.assembler().idivl_r(op2GPR);
3264             
3265         if (op2TempGPR != InvalidGPRReg)
3266             unlock(op2TempGPR);
3267
3268         // Check that there was no remainder. If there had been, then we'd be obligated to
3269         // produce a double result instead.
3270         if (bytecodeUsesAsNumber(node->arithNodeFlags()))
3271             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
3272         
3273         done.link(&m_jit);
3274         int32Result(eax.gpr(), node);
3275 #elif CPU(APPLE_ARMV7S)
3276         SpeculateInt32Operand op1(this, node->child1());
3277         SpeculateInt32Operand op2(this, node->child2());
3278         GPRReg op1GPR = op1.gpr();
3279         GPRReg op2GPR = op2.gpr();
3280         GPRTemporary quotient(this);
3281         GPRTemporary multiplyAnswer(this);
3282
3283         // If the user cares about negative zero, then speculate that we're not about
3284         // to produce negative zero.
3285         if (!bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3286             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3287             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3288             numeratorNonZero.link(&m_jit);
3289         }
3290
3291         m_jit.assembler().sdiv(quotient.gpr(), op1GPR, op2GPR);
3292
3293         // Check that there was no remainder. If there had been, then we'd be obligated to
3294         // produce a double result instead.
3295         if (bytecodeUsesAsNumber(node->arithNodeFlags())) {
3296             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
3297             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
3298         }
3299
3300         int32Result(quotient.gpr(), node);
3301 #else
3302         RELEASE_ASSERT_NOT_REACHED();
3303 #endif
3304         break;
3305  &