[x86] moveDoubleToInts() does not clobber its source register anymore
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "Arguments.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGBinarySwitch.h"
35 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGSaneStringGetByValSlowPathGenerator.h"
38 #include "DFGSlowPathGenerator.h"
39 #include "LinkBuffer.h"
40 #include "JSCInlines.h"
41 #include "ScratchRegisterAllocator.h"
42 #include "WriteBarrierBuffer.h"
43 #include <wtf/MathExtras.h>
44
45 namespace JSC { namespace DFG {
46
47 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
48     : m_compileOkay(true)
49     , m_jit(jit)
50     , m_currentNode(0)
51     , m_lastGeneratedNode(LastNodeType)
52     , m_indexInBlock(0)
53     , m_generationInfo(m_jit.graph().frameRegisterCount())
54     , m_state(m_jit.graph())
55     , m_interpreter(m_jit.graph(), m_state)
56     , m_stream(&jit.jitCode()->variableEventStream)
57     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
58     , m_isCheckingArgumentTypes(false)
59 {
60 }
61
62 SpeculativeJIT::~SpeculativeJIT()
63 {
64 }
65
66 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
67 {
68     ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
69     
70     GPRTemporary scratch(this);
71     GPRTemporary scratch2(this);
72     GPRReg scratchGPR = scratch.gpr();
73     GPRReg scratch2GPR = scratch2.gpr();
74     
75     unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
76     
77     JITCompiler::JumpList slowCases;
78     
79     slowCases.append(
80         emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
81     m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
82     emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
83     
84     m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
85     m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
86     
87     if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
88 #if USE(JSVALUE64)
89         m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
90         for (unsigned i = numElements; i < vectorLength; ++i)
91             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
92 #else
93         EncodedValueDescriptor value;
94         value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
95         for (unsigned i = numElements; i < vectorLength; ++i) {
96             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
97             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
98         }
99 #endif
100     }
101     
102     // I want a slow path that also loads out the storage pointer, and that's
103     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
104     // of work for a very small piece of functionality. :-/
105     addSlowPathGenerator(adoptPtr(
106         new CallArrayAllocatorSlowPathGenerator(
107             slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
108             structure, numElements)));
109 }
110
111 void SpeculativeJIT::emitAllocateArguments(GPRReg resultGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath)
112 {
113     Structure* structure = m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)->argumentsStructure();
114     emitAllocateDestructibleObject<Arguments>(resultGPR, structure, scratchGPR1, scratchGPR2, slowPath);
115
116     m_jit.storePtr(TrustedImmPtr(0), MacroAssembler::Address(resultGPR, Arguments::offsetOfActivation()));
117
118     m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), scratchGPR1);
119     m_jit.sub32(TrustedImm32(1), scratchGPR1);
120     m_jit.store32(scratchGPR1, MacroAssembler::Address(resultGPR, Arguments::offsetOfNumArguments()));
121
122     m_jit.store32(TrustedImm32(0), MacroAssembler::Address(resultGPR, Arguments::offsetOfOverrodeLength()));
123     if (m_jit.isStrictModeFor(m_currentNode->origin.semantic))
124         m_jit.store8(TrustedImm32(1), MacroAssembler::Address(resultGPR, Arguments::offsetOfIsStrictMode()));
125
126     m_jit.storePtr(GPRInfo::callFrameRegister, MacroAssembler::Address(resultGPR, Arguments::offsetOfRegisters()));
127     m_jit.storePtr(TrustedImmPtr(0), MacroAssembler::Address(resultGPR, Arguments::offsetOfRegisterArray()));
128     m_jit.storePtr(TrustedImmPtr(0), MacroAssembler::Address(resultGPR, Arguments::offsetOfSlowArgumentData()));
129
130     m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), scratchGPR1);
131     m_jit.storePtr(scratchGPR1, MacroAssembler::Address(resultGPR, Arguments::offsetOfCallee()));
132 }
133
134 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
135 {
136     if (!m_compileOkay)
137         return;
138     ASSERT(m_isCheckingArgumentTypes || m_canExit);
139     m_jit.appendExitInfo(jumpToFail);
140     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
141 }
142
143 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
144 {
145     if (!m_compileOkay)
146         return;
147     ASSERT(m_isCheckingArgumentTypes || m_canExit);
148     m_jit.appendExitInfo(jumpsToFail);
149     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
150 }
151
152 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
153 {
154     if (!m_compileOkay)
155         return OSRExitJumpPlaceholder();
156     ASSERT(m_isCheckingArgumentTypes || m_canExit);
157     unsigned index = m_jit.jitCode()->osrExit.size();
158     m_jit.appendExitInfo();
159     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
160     return OSRExitJumpPlaceholder(index);
161 }
162
163 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
164 {
165     ASSERT(m_isCheckingArgumentTypes || m_canExit);
166     return speculationCheck(kind, jsValueSource, nodeUse.node());
167 }
168
169 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
170 {
171     ASSERT(m_isCheckingArgumentTypes || m_canExit);
172     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
173 }
174
175 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
176 {
177     ASSERT(m_isCheckingArgumentTypes || m_canExit);
178     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
179 }
180
181 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
182 {
183     if (!m_compileOkay)
184         return;
185     ASSERT(m_isCheckingArgumentTypes || m_canExit);
186     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
187     m_jit.appendExitInfo(jumpToFail);
188     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
189 }
190
191 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
192 {
193     ASSERT(m_isCheckingArgumentTypes || m_canExit);
194     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
195 }
196
197 void SpeculativeJIT::emitInvalidationPoint(Node* node)
198 {
199     if (!m_compileOkay)
200         return;
201     ASSERT(m_canExit);
202     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
203     m_jit.jitCode()->appendOSRExit(OSRExit(
204         UncountableInvalidation, JSValueSource(),
205         m_jit.graph().methodOfGettingAValueProfileFor(node),
206         this, m_stream->size()));
207     info.m_replacementSource = m_jit.watchpointLabel();
208     ASSERT(info.m_replacementSource.isSet());
209     noResult(node);
210 }
211
212 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
213 {
214     ASSERT(m_isCheckingArgumentTypes || m_canExit);
215     if (!m_compileOkay)
216         return;
217     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
218     m_compileOkay = false;
219     if (verboseCompilationEnabled())
220         dataLog("Bailing compilation.\n");
221 }
222
223 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
224 {
225     ASSERT(m_isCheckingArgumentTypes || m_canExit);
226     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
227 }
228
229 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
230 {
231     ASSERT(needsTypeCheck(edge, typesPassedThrough));
232     m_interpreter.filter(edge, typesPassedThrough);
233     speculationCheck(BadType, source, edge.node(), jumpToFail);
234 }
235
236 RegisterSet SpeculativeJIT::usedRegisters()
237 {
238     RegisterSet result;
239     
240     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
241         GPRReg gpr = GPRInfo::toRegister(i);
242         if (m_gprs.isInUse(gpr))
243             result.set(gpr);
244     }
245     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
246         FPRReg fpr = FPRInfo::toRegister(i);
247         if (m_fprs.isInUse(fpr))
248             result.set(fpr);
249     }
250     
251     result.merge(RegisterSet::specialRegisters());
252     
253     return result;
254 }
255
256 void SpeculativeJIT::addSlowPathGenerator(PassOwnPtr<SlowPathGenerator> slowPathGenerator)
257 {
258     m_slowPathGenerators.append(slowPathGenerator);
259 }
260
261 void SpeculativeJIT::runSlowPathGenerators()
262 {
263     for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
264         m_slowPathGenerators[i]->generate(this);
265 }
266
267 // On Windows we need to wrap fmod; on other platforms we can call it directly.
268 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
269 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
270 static double JIT_OPERATION fmodAsDFGOperation(double x, double y)
271 {
272     return fmod(x, y);
273 }
274 #else
275 #define fmodAsDFGOperation fmod
276 #endif
277
278 void SpeculativeJIT::clearGenerationInfo()
279 {
280     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
281         m_generationInfo[i] = GenerationInfo();
282     m_gprs = RegisterBank<GPRInfo>();
283     m_fprs = RegisterBank<FPRInfo>();
284 }
285
286 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
287 {
288     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
289     Node* node = info.node();
290     DataFormat registerFormat = info.registerFormat();
291     ASSERT(registerFormat != DataFormatNone);
292     ASSERT(registerFormat != DataFormatDouble);
293         
294     SilentSpillAction spillAction;
295     SilentFillAction fillAction;
296         
297     if (!info.needsSpill())
298         spillAction = DoNothingForSpill;
299     else {
300 #if USE(JSVALUE64)
301         ASSERT(info.gpr() == source);
302         if (registerFormat == DataFormatInt32)
303             spillAction = Store32Payload;
304         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
305             spillAction = StorePtr;
306         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
307             spillAction = Store64;
308         else {
309             ASSERT(registerFormat & DataFormatJS);
310             spillAction = Store64;
311         }
312 #elif USE(JSVALUE32_64)
313         if (registerFormat & DataFormatJS) {
314             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
315             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
316         } else {
317             ASSERT(info.gpr() == source);
318             spillAction = Store32Payload;
319         }
320 #endif
321     }
322         
323     if (registerFormat == DataFormatInt32) {
324         ASSERT(info.gpr() == source);
325         ASSERT(isJSInt32(info.registerFormat()));
326         if (node->hasConstant()) {
327             ASSERT(node->isInt32Constant());
328             fillAction = SetInt32Constant;
329         } else
330             fillAction = Load32Payload;
331     } else if (registerFormat == DataFormatBoolean) {
332 #if USE(JSVALUE64)
333         RELEASE_ASSERT_NOT_REACHED();
334 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
335         fillAction = DoNothingForFill;
336 #endif
337 #elif USE(JSVALUE32_64)
338         ASSERT(info.gpr() == source);
339         if (node->hasConstant()) {
340             ASSERT(node->isBooleanConstant());
341             fillAction = SetBooleanConstant;
342         } else
343             fillAction = Load32Payload;
344 #endif
345     } else if (registerFormat == DataFormatCell) {
346         ASSERT(info.gpr() == source);
347         if (node->hasConstant()) {
348             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
349             node->asCell(); // To get the assertion.
350             fillAction = SetCellConstant;
351         } else {
352 #if USE(JSVALUE64)
353             fillAction = LoadPtr;
354 #else
355             fillAction = Load32Payload;
356 #endif
357         }
358     } else if (registerFormat == DataFormatStorage) {
359         ASSERT(info.gpr() == source);
360         fillAction = LoadPtr;
361     } else if (registerFormat == DataFormatInt52) {
362         if (node->hasConstant())
363             fillAction = SetInt52Constant;
364         else if (info.spillFormat() == DataFormatInt52)
365             fillAction = Load64;
366         else if (info.spillFormat() == DataFormatStrictInt52)
367             fillAction = Load64ShiftInt52Left;
368         else if (info.spillFormat() == DataFormatNone)
369             fillAction = Load64;
370         else {
371             RELEASE_ASSERT_NOT_REACHED();
372 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
373             fillAction = Load64; // Make GCC happy.
374 #endif
375         }
376     } else if (registerFormat == DataFormatStrictInt52) {
377         if (node->hasConstant())
378             fillAction = SetStrictInt52Constant;
379         else if (info.spillFormat() == DataFormatInt52)
380             fillAction = Load64ShiftInt52Right;
381         else if (info.spillFormat() == DataFormatStrictInt52)
382             fillAction = Load64;
383         else if (info.spillFormat() == DataFormatNone)
384             fillAction = Load64;
385         else {
386             RELEASE_ASSERT_NOT_REACHED();
387 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
388             fillAction = Load64; // Make GCC happy.
389 #endif
390         }
391     } else {
392         ASSERT(registerFormat & DataFormatJS);
393 #if USE(JSVALUE64)
394         ASSERT(info.gpr() == source);
395         if (node->hasConstant()) {
396             if (node->isCellConstant())
397                 fillAction = SetTrustedJSConstant;
398             else
399                 fillAction = SetJSConstant;
400         } else if (info.spillFormat() == DataFormatInt32) {
401             ASSERT(registerFormat == DataFormatJSInt32);
402             fillAction = Load32PayloadBoxInt;
403         } else
404             fillAction = Load64;
405 #else
406         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
407         if (node->hasConstant())
408             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
409         else if (info.payloadGPR() == source)
410             fillAction = Load32Payload;
411         else { // Fill the Tag
412             switch (info.spillFormat()) {
413             case DataFormatInt32:
414                 ASSERT(registerFormat == DataFormatJSInt32);
415                 fillAction = SetInt32Tag;
416                 break;
417             case DataFormatCell:
418                 ASSERT(registerFormat == DataFormatJSCell);
419                 fillAction = SetCellTag;
420                 break;
421             case DataFormatBoolean:
422                 ASSERT(registerFormat == DataFormatJSBoolean);
423                 fillAction = SetBooleanTag;
424                 break;
425             default:
426                 fillAction = Load32Tag;
427                 break;
428             }
429         }
430 #endif
431     }
432         
433     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
434 }
435     
436 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
437 {
438     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
439     Node* node = info.node();
440     ASSERT(info.registerFormat() == DataFormatDouble);
441
442     SilentSpillAction spillAction;
443     SilentFillAction fillAction;
444         
445     if (!info.needsSpill())
446         spillAction = DoNothingForSpill;
447     else {
448         ASSERT(!node->hasConstant());
449         ASSERT(info.spillFormat() == DataFormatNone);
450         ASSERT(info.fpr() == source);
451         spillAction = StoreDouble;
452     }
453         
454 #if USE(JSVALUE64)
455     if (node->hasConstant()) {
456         node->asNumber(); // To get the assertion.
457         fillAction = SetDoubleConstant;
458     } else {
459         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
460         fillAction = LoadDouble;
461     }
462 #elif USE(JSVALUE32_64)
463     ASSERT(info.registerFormat() == DataFormatDouble);
464     if (node->hasConstant()) {
465         node->asNumber(); // To get the assertion.
466         fillAction = SetDoubleConstant;
467     } else
468         fillAction = LoadDouble;
469 #endif
470
471     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
472 }
473     
474 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
475 {
476     switch (plan.spillAction()) {
477     case DoNothingForSpill:
478         break;
479     case Store32Tag:
480         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
481         break;
482     case Store32Payload:
483         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
484         break;
485     case StorePtr:
486         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
487         break;
488 #if USE(JSVALUE64)
489     case Store64:
490         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
491         break;
492 #endif
493     case StoreDouble:
494         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
495         break;
496     default:
497         RELEASE_ASSERT_NOT_REACHED();
498     }
499 }
500     
501 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
502 {
503 #if USE(JSVALUE32_64)
504     UNUSED_PARAM(canTrample);
505 #endif
506     switch (plan.fillAction()) {
507     case DoNothingForFill:
508         break;
509     case SetInt32Constant:
510         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
511         break;
512 #if USE(JSVALUE64)
513     case SetInt52Constant:
514         m_jit.move(Imm64(plan.node()->asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
515         break;
516     case SetStrictInt52Constant:
517         m_jit.move(Imm64(plan.node()->asMachineInt()), plan.gpr());
518         break;
519 #endif // USE(JSVALUE64)
520     case SetBooleanConstant:
521         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
522         break;
523     case SetCellConstant:
524         m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
525         break;
526 #if USE(JSVALUE64)
527     case SetTrustedJSConstant:
528         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
529         break;
530     case SetJSConstant:
531         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
532         break;
533     case SetDoubleConstant:
534         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
535         m_jit.move64ToDouble(canTrample, plan.fpr());
536         break;
537     case Load32PayloadBoxInt:
538         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
539         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
540         break;
541     case Load32PayloadConvertToInt52:
542         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
543         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
544         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
545         break;
546     case Load32PayloadSignExtend:
547         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
548         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
549         break;
550 #else
551     case SetJSConstantTag:
552         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
553         break;
554     case SetJSConstantPayload:
555         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
556         break;
557     case SetInt32Tag:
558         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
559         break;
560     case SetCellTag:
561         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
562         break;
563     case SetBooleanTag:
564         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
565         break;
566     case SetDoubleConstant:
567         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
568         break;
569 #endif
570     case Load32Tag:
571         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
572         break;
573     case Load32Payload:
574         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
575         break;
576     case LoadPtr:
577         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
578         break;
579 #if USE(JSVALUE64)
580     case Load64:
581         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
582         break;
583     case Load64ShiftInt52Right:
584         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
585         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
586         break;
587     case Load64ShiftInt52Left:
588         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
589         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
590         break;
591 #endif
592     case LoadDouble:
593         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
594         break;
595     default:
596         RELEASE_ASSERT_NOT_REACHED();
597     }
598 }
599     
600 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
601 {
602     switch (arrayMode.arrayClass()) {
603     case Array::OriginalArray: {
604         CRASH();
605 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
606         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
607         return result;
608 #endif
609     }
610         
611     case Array::Array:
612         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
613         return m_jit.branch32(
614             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
615         
616     case Array::NonArray:
617     case Array::OriginalNonArray:
618         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
619         return m_jit.branch32(
620             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
621         
622     case Array::PossiblyArray:
623         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
624         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
625     }
626     
627     RELEASE_ASSERT_NOT_REACHED();
628     return JITCompiler::Jump();
629 }
630
631 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
632 {
633     JITCompiler::JumpList result;
634     
635     switch (arrayMode.type()) {
636     case Array::Int32:
637         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
638
639     case Array::Double:
640         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
641
642     case Array::Contiguous:
643         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
644
645     case Array::ArrayStorage:
646     case Array::SlowPutArrayStorage: {
647         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
648         
649         if (arrayMode.isJSArray()) {
650             if (arrayMode.isSlowPut()) {
651                 result.append(
652                     m_jit.branchTest32(
653                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
654                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
655                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
656                 result.append(
657                     m_jit.branch32(
658                         MacroAssembler::Above, tempGPR,
659                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
660                 break;
661             }
662             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
663             result.append(
664                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
665             break;
666         }
667         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
668         if (arrayMode.isSlowPut()) {
669             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
670             result.append(
671                 m_jit.branch32(
672                     MacroAssembler::Above, tempGPR,
673                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
674             break;
675         }
676         result.append(
677             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
678         break;
679     }
680     default:
681         CRASH();
682         break;
683     }
684     
685     return result;
686 }
687
688 void SpeculativeJIT::checkArray(Node* node)
689 {
690     ASSERT(node->arrayMode().isSpecific());
691     ASSERT(!node->arrayMode().doesConversion());
692     
693     SpeculateCellOperand base(this, node->child1());
694     GPRReg baseReg = base.gpr();
695     
696     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
697         noResult(m_currentNode);
698         return;
699     }
700     
701     const ClassInfo* expectedClassInfo = 0;
702     
703     switch (node->arrayMode().type()) {
704     case Array::String:
705         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
706         break;
707     case Array::Int32:
708     case Array::Double:
709     case Array::Contiguous:
710     case Array::ArrayStorage:
711     case Array::SlowPutArrayStorage: {
712         GPRTemporary temp(this);
713         GPRReg tempGPR = temp.gpr();
714         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
715         speculationCheck(
716             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
717             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
718         
719         noResult(m_currentNode);
720         return;
721     }
722     case Array::Arguments:
723         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ArgumentsType);
724
725         noResult(m_currentNode);
726         return;
727     default:
728         speculateCellTypeWithoutTypeFiltering(
729             node->child1(), baseReg,
730             typeForTypedArrayType(node->arrayMode().typedArrayType()));
731         noResult(m_currentNode);
732         return;
733     }
734     
735     RELEASE_ASSERT(expectedClassInfo);
736     
737     GPRTemporary temp(this);
738     GPRTemporary temp2(this);
739     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
740     speculationCheck(
741         BadType, JSValueSource::unboxedCell(baseReg), node,
742         m_jit.branchPtr(
743             MacroAssembler::NotEqual,
744             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
745             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
746     
747     noResult(m_currentNode);
748 }
749
750 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
751 {
752     ASSERT(node->arrayMode().doesConversion());
753     
754     GPRTemporary temp(this);
755     GPRTemporary structure;
756     GPRReg tempGPR = temp.gpr();
757     GPRReg structureGPR = InvalidGPRReg;
758     
759     if (node->op() != ArrayifyToStructure) {
760         GPRTemporary realStructure(this);
761         structure.adopt(realStructure);
762         structureGPR = structure.gpr();
763     }
764         
765     // We can skip all that comes next if we already have array storage.
766     MacroAssembler::JumpList slowPath;
767     
768     if (node->op() == ArrayifyToStructure) {
769         slowPath.append(m_jit.branchWeakStructure(
770             JITCompiler::NotEqual,
771             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
772             node->structure()));
773     } else {
774         m_jit.load8(
775             MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
776         
777         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
778     }
779     
780     addSlowPathGenerator(adoptPtr(new ArrayifySlowPathGenerator(
781         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR)));
782     
783     noResult(m_currentNode);
784 }
785
786 void SpeculativeJIT::arrayify(Node* node)
787 {
788     ASSERT(node->arrayMode().isSpecific());
789     
790     SpeculateCellOperand base(this, node->child1());
791     
792     if (!node->child2()) {
793         arrayify(node, base.gpr(), InvalidGPRReg);
794         return;
795     }
796     
797     SpeculateInt32Operand property(this, node->child2());
798     
799     arrayify(node, base.gpr(), property.gpr());
800 }
801
802 GPRReg SpeculativeJIT::fillStorage(Edge edge)
803 {
804     VirtualRegister virtualRegister = edge->virtualRegister();
805     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
806     
807     switch (info.registerFormat()) {
808     case DataFormatNone: {
809         if (info.spillFormat() == DataFormatStorage) {
810             GPRReg gpr = allocate();
811             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
812             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
813             info.fillStorage(*m_stream, gpr);
814             return gpr;
815         }
816         
817         // Must be a cell; fill it as a cell and then return the pointer.
818         return fillSpeculateCell(edge);
819     }
820         
821     case DataFormatStorage: {
822         GPRReg gpr = info.gpr();
823         m_gprs.lock(gpr);
824         return gpr;
825     }
826         
827     default:
828         return fillSpeculateCell(edge);
829     }
830 }
831
832 void SpeculativeJIT::useChildren(Node* node)
833 {
834     if (node->flags() & NodeHasVarArgs) {
835         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
836             if (!!m_jit.graph().m_varArgChildren[childIdx])
837                 use(m_jit.graph().m_varArgChildren[childIdx]);
838         }
839     } else {
840         Edge child1 = node->child1();
841         if (!child1) {
842             ASSERT(!node->child2() && !node->child3());
843             return;
844         }
845         use(child1);
846         
847         Edge child2 = node->child2();
848         if (!child2) {
849             ASSERT(!node->child3());
850             return;
851         }
852         use(child2);
853         
854         Edge child3 = node->child3();
855         if (!child3)
856             return;
857         use(child3);
858     }
859 }
860
861 void SpeculativeJIT::compileIn(Node* node)
862 {
863     SpeculateCellOperand base(this, node->child2());
864     GPRReg baseGPR = base.gpr();
865     
866     if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
867         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
868             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo();
869             
870             GPRTemporary result(this);
871             GPRReg resultGPR = result.gpr();
872
873             use(node->child1());
874             
875             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
876             MacroAssembler::Label done = m_jit.label();
877             
878             OwnPtr<SlowPathGenerator> slowPath = slowPathCall(
879                 jump.m_jump, this, operationInOptimize,
880                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
881                 string->tryGetValueImpl());
882             
883             stubInfo->codeOrigin = node->origin.semantic;
884             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
885             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
886             stubInfo->patch.usedRegisters = usedRegisters();
887             stubInfo->patch.spillMode = NeedToSpill;
888             
889             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
890             addSlowPathGenerator(slowPath.release());
891                 
892             base.use();
893             
894             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
895             return;
896         }
897     }
898         
899     JSValueOperand key(this, node->child1());
900     JSValueRegs regs = key.jsValueRegs();
901         
902     GPRResult result(this);
903     GPRReg resultGPR = result.gpr();
904         
905     base.use();
906     key.use();
907         
908     flushRegisters();
909     callOperation(
910         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
911         baseGPR, regs);
912     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
913 }
914
915 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
916 {
917     unsigned branchIndexInBlock = detectPeepHoleBranch();
918     if (branchIndexInBlock != UINT_MAX) {
919         Node* branchNode = m_block->at(branchIndexInBlock);
920
921         ASSERT(node->adjustedRefCount() == 1);
922         
923         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
924     
925         m_indexInBlock = branchIndexInBlock;
926         m_currentNode = branchNode;
927         
928         return true;
929     }
930     
931     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
932     
933     return false;
934 }
935
936 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
937 {
938     unsigned branchIndexInBlock = detectPeepHoleBranch();
939     if (branchIndexInBlock != UINT_MAX) {
940         Node* branchNode = m_block->at(branchIndexInBlock);
941
942         ASSERT(node->adjustedRefCount() == 1);
943         
944         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
945     
946         m_indexInBlock = branchIndexInBlock;
947         m_currentNode = branchNode;
948         
949         return true;
950     }
951     
952     nonSpeculativeNonPeepholeStrictEq(node, invert);
953     
954     return false;
955 }
956
957 static const char* dataFormatString(DataFormat format)
958 {
959     // These values correspond to the DataFormat enum.
960     const char* strings[] = {
961         "[  ]",
962         "[ i]",
963         "[ d]",
964         "[ c]",
965         "Err!",
966         "Err!",
967         "Err!",
968         "Err!",
969         "[J ]",
970         "[Ji]",
971         "[Jd]",
972         "[Jc]",
973         "Err!",
974         "Err!",
975         "Err!",
976         "Err!",
977     };
978     return strings[format];
979 }
980
981 void SpeculativeJIT::dump(const char* label)
982 {
983     if (label)
984         dataLogF("<%s>\n", label);
985
986     dataLogF("  gprs:\n");
987     m_gprs.dump();
988     dataLogF("  fprs:\n");
989     m_fprs.dump();
990     dataLogF("  VirtualRegisters:\n");
991     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
992         GenerationInfo& info = m_generationInfo[i];
993         if (info.alive())
994             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
995         else
996             dataLogF("    % 3d:[__][__]", i);
997         if (info.registerFormat() == DataFormatDouble)
998             dataLogF(":fpr%d\n", info.fpr());
999         else if (info.registerFormat() != DataFormatNone
1000 #if USE(JSVALUE32_64)
1001             && !(info.registerFormat() & DataFormatJS)
1002 #endif
1003             ) {
1004             ASSERT(info.gpr() != InvalidGPRReg);
1005             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1006         } else
1007             dataLogF("\n");
1008     }
1009     if (label)
1010         dataLogF("</%s>\n", label);
1011 }
1012
1013 GPRTemporary::GPRTemporary()
1014     : m_jit(0)
1015     , m_gpr(InvalidGPRReg)
1016 {
1017 }
1018
1019 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1020     : m_jit(jit)
1021     , m_gpr(InvalidGPRReg)
1022 {
1023     m_gpr = m_jit->allocate();
1024 }
1025
1026 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1027     : m_jit(jit)
1028     , m_gpr(InvalidGPRReg)
1029 {
1030     m_gpr = m_jit->allocate(specific);
1031 }
1032
1033 #if USE(JSVALUE32_64)
1034 GPRTemporary::GPRTemporary(
1035     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1036     : m_jit(jit)
1037     , m_gpr(InvalidGPRReg)
1038 {
1039     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1040         m_gpr = m_jit->reuse(op1.gpr(which));
1041     else
1042         m_gpr = m_jit->allocate();
1043 }
1044 #endif // USE(JSVALUE32_64)
1045
1046 JSValueRegsTemporary::JSValueRegsTemporary() { }
1047
1048 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1049 #if USE(JSVALUE64)
1050     : m_gpr(jit)
1051 #else
1052     : m_payloadGPR(jit)
1053     , m_tagGPR(jit)
1054 #endif
1055 {
1056 }
1057
1058 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1059
1060 JSValueRegs JSValueRegsTemporary::regs()
1061 {
1062 #if USE(JSVALUE64)
1063     return JSValueRegs(m_gpr.gpr());
1064 #else
1065     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1066 #endif
1067 }
1068
1069 void GPRTemporary::adopt(GPRTemporary& other)
1070 {
1071     ASSERT(!m_jit);
1072     ASSERT(m_gpr == InvalidGPRReg);
1073     ASSERT(other.m_jit);
1074     ASSERT(other.m_gpr != InvalidGPRReg);
1075     m_jit = other.m_jit;
1076     m_gpr = other.m_gpr;
1077     other.m_jit = 0;
1078     other.m_gpr = InvalidGPRReg;
1079 }
1080
1081 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1082     : m_jit(jit)
1083     , m_fpr(InvalidFPRReg)
1084 {
1085     m_fpr = m_jit->fprAllocate();
1086 }
1087
1088 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1089     : m_jit(jit)
1090     , m_fpr(InvalidFPRReg)
1091 {
1092     if (m_jit->canReuse(op1.node()))
1093         m_fpr = m_jit->reuse(op1.fpr());
1094     else
1095         m_fpr = m_jit->fprAllocate();
1096 }
1097
1098 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1099     : m_jit(jit)
1100     , m_fpr(InvalidFPRReg)
1101 {
1102     if (m_jit->canReuse(op1.node()))
1103         m_fpr = m_jit->reuse(op1.fpr());
1104     else if (m_jit->canReuse(op2.node()))
1105         m_fpr = m_jit->reuse(op2.fpr());
1106     else
1107         m_fpr = m_jit->fprAllocate();
1108 }
1109
1110 #if USE(JSVALUE32_64)
1111 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1112     : m_jit(jit)
1113     , m_fpr(InvalidFPRReg)
1114 {
1115     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1116         m_fpr = m_jit->reuse(op1.fpr());
1117     else
1118         m_fpr = m_jit->fprAllocate();
1119 }
1120 #endif
1121
1122 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1123 {
1124     BasicBlock* taken = branchNode->branchData()->taken.block;
1125     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1126     
1127     SpeculateDoubleOperand op1(this, node->child1());
1128     SpeculateDoubleOperand op2(this, node->child2());
1129     
1130     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1131     jump(notTaken);
1132 }
1133
1134 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1135 {
1136     BasicBlock* taken = branchNode->branchData()->taken.block;
1137     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1138
1139     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1140     
1141     if (taken == nextBlock()) {
1142         condition = MacroAssembler::NotEqual;
1143         BasicBlock* tmp = taken;
1144         taken = notTaken;
1145         notTaken = tmp;
1146     }
1147
1148     SpeculateCellOperand op1(this, node->child1());
1149     SpeculateCellOperand op2(this, node->child2());
1150     
1151     GPRReg op1GPR = op1.gpr();
1152     GPRReg op2GPR = op2.gpr();
1153     
1154     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1155         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1156             speculationCheck(
1157                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), 
1158                 m_jit.branchStructurePtr(
1159                     MacroAssembler::Equal, 
1160                     MacroAssembler::Address(op1GPR, JSCell::structureIDOffset()), 
1161                     m_jit.vm()->stringStructure.get()));
1162         }
1163         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1164             speculationCheck(
1165                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1166                 m_jit.branchStructurePtr(
1167                     MacroAssembler::Equal, 
1168                     MacroAssembler::Address(op2GPR, JSCell::structureIDOffset()), 
1169                     m_jit.vm()->stringStructure.get()));
1170         }
1171     } else {
1172         GPRTemporary structure(this);
1173         GPRTemporary temp(this);
1174         GPRReg structureGPR = structure.gpr();
1175
1176         m_jit.emitLoadStructure(op1GPR, structureGPR, temp.gpr());
1177         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1178             speculationCheck(
1179                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1180                 m_jit.branchPtr(
1181                     MacroAssembler::Equal, 
1182                     structureGPR, 
1183                     MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1184         }
1185         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1186             m_jit.branchTest8(
1187                 MacroAssembler::NonZero, 
1188                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1189                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1190
1191         m_jit.emitLoadStructure(op2GPR, structureGPR, temp.gpr());
1192         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1193             speculationCheck(
1194                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1195                 m_jit.branchPtr(
1196                     MacroAssembler::Equal, 
1197                     structureGPR, 
1198                     MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1199         }
1200         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1201             m_jit.branchTest8(
1202                 MacroAssembler::NonZero, 
1203                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1204                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1205     }
1206
1207     branchPtr(condition, op1GPR, op2GPR, taken);
1208     jump(notTaken);
1209 }
1210
1211 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1212 {
1213     BasicBlock* taken = branchNode->branchData()->taken.block;
1214     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1215
1216     // The branch instruction will branch to the taken block.
1217     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1218     if (taken == nextBlock()) {
1219         condition = JITCompiler::invert(condition);
1220         BasicBlock* tmp = taken;
1221         taken = notTaken;
1222         notTaken = tmp;
1223     }
1224
1225     if (node->child1()->isBooleanConstant()) {
1226         bool imm = node->child1()->asBoolean();
1227         SpeculateBooleanOperand op2(this, node->child2());
1228         branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1229     } else if (node->child2()->isBooleanConstant()) {
1230         SpeculateBooleanOperand op1(this, node->child1());
1231         bool imm = node->child2()->asBoolean();
1232         branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1233     } else {
1234         SpeculateBooleanOperand op1(this, node->child1());
1235         SpeculateBooleanOperand op2(this, node->child2());
1236         branch32(condition, op1.gpr(), op2.gpr(), taken);
1237     }
1238
1239     jump(notTaken);
1240 }
1241
1242 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1243 {
1244     BasicBlock* taken = branchNode->branchData()->taken.block;
1245     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1246
1247     // The branch instruction will branch to the taken block.
1248     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1249     if (taken == nextBlock()) {
1250         condition = JITCompiler::invert(condition);
1251         BasicBlock* tmp = taken;
1252         taken = notTaken;
1253         notTaken = tmp;
1254     }
1255
1256     if (node->child1()->isInt32Constant()) {
1257         int32_t imm = node->child1()->asInt32();
1258         SpeculateInt32Operand op2(this, node->child2());
1259         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1260     } else if (node->child2()->isInt32Constant()) {
1261         SpeculateInt32Operand op1(this, node->child1());
1262         int32_t imm = node->child2()->asInt32();
1263         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1264     } else {
1265         SpeculateInt32Operand op1(this, node->child1());
1266         SpeculateInt32Operand op2(this, node->child2());
1267         branch32(condition, op1.gpr(), op2.gpr(), taken);
1268     }
1269
1270     jump(notTaken);
1271 }
1272
1273 // Returns true if the compare is fused with a subsequent branch.
1274 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1275 {
1276     // Fused compare & branch.
1277     unsigned branchIndexInBlock = detectPeepHoleBranch();
1278     if (branchIndexInBlock != UINT_MAX) {
1279         Node* branchNode = m_block->at(branchIndexInBlock);
1280
1281         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1282         // so can be no intervening nodes to also reference the compare. 
1283         ASSERT(node->adjustedRefCount() == 1);
1284
1285         if (node->isBinaryUseKind(Int32Use))
1286             compilePeepHoleInt32Branch(node, branchNode, condition);
1287 #if USE(JSVALUE64)
1288         else if (node->isBinaryUseKind(Int52RepUse))
1289             compilePeepHoleInt52Branch(node, branchNode, condition);
1290 #endif // USE(JSVALUE64)
1291         else if (node->isBinaryUseKind(DoubleRepUse))
1292             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1293         else if (node->op() == CompareEq) {
1294             if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1295                 // Use non-peephole comparison, for now.
1296                 return false;
1297             }
1298             if (node->isBinaryUseKind(BooleanUse))
1299                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1300             else if (node->isBinaryUseKind(ObjectUse))
1301                 compilePeepHoleObjectEquality(node, branchNode);
1302             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1303                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1304             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1305                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1306             else {
1307                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1308                 return true;
1309             }
1310         } else {
1311             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1312             return true;
1313         }
1314
1315         use(node->child1());
1316         use(node->child2());
1317         m_indexInBlock = branchIndexInBlock;
1318         m_currentNode = branchNode;
1319         return true;
1320     }
1321     return false;
1322 }
1323
1324 void SpeculativeJIT::noticeOSRBirth(Node* node)
1325 {
1326     if (!node->hasVirtualRegister())
1327         return;
1328     
1329     VirtualRegister virtualRegister = node->virtualRegister();
1330     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1331     
1332     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1333 }
1334
1335 void SpeculativeJIT::compileMovHint(Node* node)
1336 {
1337     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1338     
1339     Node* child = node->child1().node();
1340     noticeOSRBirth(child);
1341     
1342     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1343 }
1344
1345 void SpeculativeJIT::bail(AbortReason reason)
1346 {
1347     if (verboseCompilationEnabled())
1348         dataLog("Bailing compilation.\n");
1349     m_compileOkay = true;
1350     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1351     clearGenerationInfo();
1352 }
1353
1354 void SpeculativeJIT::compileCurrentBlock()
1355 {
1356     ASSERT(m_compileOkay);
1357     
1358     if (!m_block)
1359         return;
1360     
1361     ASSERT(m_block->isReachable);
1362     
1363     m_jit.blockHeads()[m_block->index] = m_jit.label();
1364
1365     if (!m_block->intersectionOfCFAHasVisited) {
1366         // Don't generate code for basic blocks that are unreachable according to CFA.
1367         // But to be sure that nobody has generated a jump to this block, drop in a
1368         // breakpoint here.
1369         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1370         return;
1371     }
1372
1373     m_stream->appendAndLog(VariableEvent::reset());
1374     
1375     m_jit.jitAssertHasValidCallFrame();
1376     m_jit.jitAssertTagsInPlace();
1377     m_jit.jitAssertArgumentCountSane();
1378
1379     m_state.reset();
1380     m_state.beginBasicBlock(m_block);
1381     
1382     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1383         int operand = m_block->variablesAtHead.operandForIndex(i);
1384         Node* node = m_block->variablesAtHead[i];
1385         if (!node)
1386             continue; // No need to record dead SetLocal's.
1387         
1388         VariableAccessData* variable = node->variableAccessData();
1389         DataFormat format;
1390         if (!node->refCount())
1391             continue; // No need to record dead SetLocal's.
1392         format = dataFormatFor(variable->flushFormat());
1393         m_stream->appendAndLog(
1394             VariableEvent::setLocal(
1395                 VirtualRegister(operand),
1396                 variable->machineLocal(),
1397                 format));
1398     }
1399     
1400     m_codeOriginForExitTarget = CodeOrigin();
1401     m_codeOriginForExitProfile = CodeOrigin();
1402     
1403     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1404         m_currentNode = m_block->at(m_indexInBlock);
1405         
1406         // We may have hit a contradiction that the CFA was aware of but that the JIT
1407         // didn't cause directly.
1408         if (!m_state.isValid()) {
1409             bail(DFGBailedAtTopOfBlock);
1410             return;
1411         }
1412
1413         if (ASSERT_DISABLED)
1414             m_canExit = true; // Essentially disable the assertions.
1415         else
1416             m_canExit = mayExit(m_jit.graph(), m_currentNode);
1417         
1418         bool shouldExecuteEffects = m_interpreter.startExecuting(m_currentNode);
1419         m_jit.setForNode(m_currentNode);
1420         m_codeOriginForExitTarget = m_currentNode->origin.forExit;
1421         m_codeOriginForExitProfile = m_currentNode->origin.semantic;
1422         m_lastGeneratedNode = m_currentNode->op();
1423         if (!m_currentNode->shouldGenerate()) {
1424             switch (m_currentNode->op()) {
1425             case JSConstant:
1426                 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1427                 break;
1428                 
1429             case SetLocal:
1430                 RELEASE_ASSERT_NOT_REACHED();
1431                 break;
1432                 
1433             case MovHint:
1434                 compileMovHint(m_currentNode);
1435                 break;
1436                 
1437             case ZombieHint: {
1438                 recordSetLocal(m_currentNode->unlinkedLocal(), VirtualRegister(), DataFormatDead);
1439                 break;
1440             }
1441
1442             default:
1443                 if (belongsInMinifiedGraph(m_currentNode->op()))
1444                     m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1445                 break;
1446             }
1447         } else {
1448             
1449             if (verboseCompilationEnabled()) {
1450                 dataLogF(
1451                     "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1452                     (int)m_currentNode->index(),
1453                     m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1454                 dataLog("\n");
1455             }
1456             
1457             compile(m_currentNode);
1458
1459 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1460             m_jit.clearRegisterAllocationOffsets();
1461 #endif
1462
1463             if (!m_compileOkay) {
1464                 bail(DFGBailedAtEndOfNode);
1465                 return;
1466             }
1467             
1468             if (belongsInMinifiedGraph(m_currentNode->op())) {
1469                 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1470                 noticeOSRBirth(m_currentNode);
1471             }
1472         }
1473         
1474         // Make sure that the abstract state is rematerialized for the next node.
1475         if (shouldExecuteEffects)
1476             m_interpreter.executeEffects(m_indexInBlock);
1477     }
1478     
1479     // Perform the most basic verification that children have been used correctly.
1480     if (!ASSERT_DISABLED) {
1481         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1482             GenerationInfo& info = m_generationInfo[index];
1483             RELEASE_ASSERT(!info.alive());
1484         }
1485     }
1486 }
1487
1488 // If we are making type predictions about our arguments then
1489 // we need to check that they are correct on function entry.
1490 void SpeculativeJIT::checkArgumentTypes()
1491 {
1492     ASSERT(!m_currentNode);
1493     m_isCheckingArgumentTypes = true;
1494     m_codeOriginForExitTarget = CodeOrigin(0);
1495     m_codeOriginForExitProfile = CodeOrigin(0);
1496
1497     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1498         Node* node = m_jit.graph().m_arguments[i];
1499         if (!node) {
1500             // The argument is dead. We don't do any checks for such arguments.
1501             continue;
1502         }
1503         
1504         ASSERT(node->op() == SetArgument);
1505         ASSERT(node->shouldGenerate());
1506
1507         VariableAccessData* variableAccessData = node->variableAccessData();
1508         FlushFormat format = variableAccessData->flushFormat();
1509         
1510         if (format == FlushedJSValue)
1511             continue;
1512         
1513         VirtualRegister virtualRegister = variableAccessData->local();
1514
1515         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1516         
1517 #if USE(JSVALUE64)
1518         switch (format) {
1519         case FlushedInt32: {
1520             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1521             break;
1522         }
1523         case FlushedBoolean: {
1524             GPRTemporary temp(this);
1525             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1526             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1527             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1528             break;
1529         }
1530         case FlushedCell: {
1531             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1532             break;
1533         }
1534         default:
1535             RELEASE_ASSERT_NOT_REACHED();
1536             break;
1537         }
1538 #else
1539         switch (format) {
1540         case FlushedInt32: {
1541             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1542             break;
1543         }
1544         case FlushedBoolean: {
1545             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1546             break;
1547         }
1548         case FlushedCell: {
1549             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1550             break;
1551         }
1552         default:
1553             RELEASE_ASSERT_NOT_REACHED();
1554             break;
1555         }
1556 #endif
1557     }
1558     m_isCheckingArgumentTypes = false;
1559 }
1560
1561 bool SpeculativeJIT::compile()
1562 {
1563     checkArgumentTypes();
1564     
1565     ASSERT(!m_currentNode);
1566     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1567         m_jit.setForBlockIndex(blockIndex);
1568         m_block = m_jit.graph().block(blockIndex);
1569         compileCurrentBlock();
1570     }
1571     linkBranches();
1572     return true;
1573 }
1574
1575 void SpeculativeJIT::createOSREntries()
1576 {
1577     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1578         BasicBlock* block = m_jit.graph().block(blockIndex);
1579         if (!block)
1580             continue;
1581         if (!block->isOSRTarget)
1582             continue;
1583         
1584         // Currently we don't have OSR entry trampolines. We could add them
1585         // here if need be.
1586         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1587     }
1588 }
1589
1590 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1591 {
1592     unsigned osrEntryIndex = 0;
1593     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1594         BasicBlock* block = m_jit.graph().block(blockIndex);
1595         if (!block)
1596             continue;
1597         if (!block->isOSRTarget)
1598             continue;
1599         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1600     }
1601     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1602 }
1603
1604 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1605 {
1606     Edge child3 = m_jit.graph().varArgChild(node, 2);
1607     Edge child4 = m_jit.graph().varArgChild(node, 3);
1608
1609     ArrayMode arrayMode = node->arrayMode();
1610     
1611     GPRReg baseReg = base.gpr();
1612     GPRReg propertyReg = property.gpr();
1613     
1614     SpeculateDoubleOperand value(this, child3);
1615
1616     FPRReg valueReg = value.fpr();
1617     
1618     DFG_TYPE_CHECK(
1619         JSValueRegs(), child3, SpecFullRealNumber,
1620         m_jit.branchDouble(
1621             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1622     
1623     if (!m_compileOkay)
1624         return;
1625     
1626     StorageOperand storage(this, child4);
1627     GPRReg storageReg = storage.gpr();
1628
1629     if (node->op() == PutByValAlias) {
1630         // Store the value to the array.
1631         GPRReg propertyReg = property.gpr();
1632         FPRReg valueReg = value.fpr();
1633         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1634         
1635         noResult(m_currentNode);
1636         return;
1637     }
1638     
1639     GPRTemporary temporary;
1640     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1641
1642     MacroAssembler::Jump slowCase;
1643     
1644     if (arrayMode.isInBounds()) {
1645         speculationCheck(
1646             OutOfBounds, JSValueRegs(), 0,
1647             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1648     } else {
1649         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1650         
1651         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1652         
1653         if (!arrayMode.isOutOfBounds())
1654             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1655         
1656         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1657         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1658         
1659         inBounds.link(&m_jit);
1660     }
1661     
1662     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1663
1664     base.use();
1665     property.use();
1666     value.use();
1667     storage.use();
1668     
1669     if (arrayMode.isOutOfBounds()) {
1670         addSlowPathGenerator(
1671             slowPathCall(
1672                 slowCase, this,
1673                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1674                 NoResult, baseReg, propertyReg, valueReg));
1675     }
1676
1677     noResult(m_currentNode, UseChildrenCalledExplicitly);
1678 }
1679
1680 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1681 {
1682     SpeculateCellOperand string(this, node->child1());
1683     SpeculateStrictInt32Operand index(this, node->child2());
1684     StorageOperand storage(this, node->child3());
1685
1686     GPRReg stringReg = string.gpr();
1687     GPRReg indexReg = index.gpr();
1688     GPRReg storageReg = storage.gpr();
1689     
1690     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1691
1692     // unsigned comparison so we can filter out negative indices and indices that are too large
1693     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1694
1695     GPRTemporary scratch(this);
1696     GPRReg scratchReg = scratch.gpr();
1697
1698     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1699
1700     // Load the character into scratchReg
1701     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1702
1703     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1704     JITCompiler::Jump cont8Bit = m_jit.jump();
1705
1706     is16Bit.link(&m_jit);
1707
1708     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1709
1710     cont8Bit.link(&m_jit);
1711
1712     int32Result(scratchReg, m_currentNode);
1713 }
1714
1715 void SpeculativeJIT::compileGetByValOnString(Node* node)
1716 {
1717     SpeculateCellOperand base(this, node->child1());
1718     SpeculateStrictInt32Operand property(this, node->child2());
1719     StorageOperand storage(this, node->child3());
1720     GPRReg baseReg = base.gpr();
1721     GPRReg propertyReg = property.gpr();
1722     GPRReg storageReg = storage.gpr();
1723
1724     GPRTemporary scratch(this);
1725     GPRReg scratchReg = scratch.gpr();
1726 #if USE(JSVALUE32_64)
1727     GPRTemporary resultTag;
1728     GPRReg resultTagReg = InvalidGPRReg;
1729     if (node->arrayMode().isOutOfBounds()) {
1730         GPRTemporary realResultTag(this);
1731         resultTag.adopt(realResultTag);
1732         resultTagReg = resultTag.gpr();
1733     }
1734 #endif
1735
1736     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1737
1738     // unsigned comparison so we can filter out negative indices and indices that are too large
1739     JITCompiler::Jump outOfBounds = m_jit.branch32(
1740         MacroAssembler::AboveOrEqual, propertyReg,
1741         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1742     if (node->arrayMode().isInBounds())
1743         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1744
1745     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1746
1747     // Load the character into scratchReg
1748     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1749
1750     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1751     JITCompiler::Jump cont8Bit = m_jit.jump();
1752
1753     is16Bit.link(&m_jit);
1754
1755     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1756
1757     JITCompiler::Jump bigCharacter =
1758         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1759
1760     // 8 bit string values don't need the isASCII check.
1761     cont8Bit.link(&m_jit);
1762
1763     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1764     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1765     m_jit.loadPtr(scratchReg, scratchReg);
1766
1767     addSlowPathGenerator(
1768         slowPathCall(
1769             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1770
1771     if (node->arrayMode().isOutOfBounds()) {
1772 #if USE(JSVALUE32_64)
1773         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1774 #endif
1775
1776         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1777         if (globalObject->stringPrototypeChainIsSane()) {
1778 #if USE(JSVALUE64)
1779             addSlowPathGenerator(adoptPtr(new SaneStringGetByValSlowPathGenerator(
1780                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg)));
1781 #else
1782             addSlowPathGenerator(adoptPtr(new SaneStringGetByValSlowPathGenerator(
1783                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
1784                 baseReg, propertyReg)));
1785 #endif
1786         } else {
1787 #if USE(JSVALUE64)
1788             addSlowPathGenerator(
1789                 slowPathCall(
1790                     outOfBounds, this, operationGetByValStringInt,
1791                     scratchReg, baseReg, propertyReg));
1792 #else
1793             addSlowPathGenerator(
1794                 slowPathCall(
1795                     outOfBounds, this, operationGetByValStringInt,
1796                     resultTagReg, scratchReg, baseReg, propertyReg));
1797 #endif
1798         }
1799         
1800 #if USE(JSVALUE64)
1801         jsValueResult(scratchReg, m_currentNode);
1802 #else
1803         jsValueResult(resultTagReg, scratchReg, m_currentNode);
1804 #endif
1805     } else
1806         cellResult(scratchReg, m_currentNode);
1807 }
1808
1809 void SpeculativeJIT::compileFromCharCode(Node* node)
1810 {
1811     SpeculateStrictInt32Operand property(this, node->child1());
1812     GPRReg propertyReg = property.gpr();
1813     GPRTemporary smallStrings(this);
1814     GPRTemporary scratch(this);
1815     GPRReg scratchReg = scratch.gpr();
1816     GPRReg smallStringsReg = smallStrings.gpr();
1817
1818     JITCompiler::JumpList slowCases;
1819     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
1820     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
1821     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
1822
1823     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
1824     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
1825     cellResult(scratchReg, m_currentNode);
1826 }
1827
1828 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
1829 {
1830     VirtualRegister virtualRegister = node->virtualRegister();
1831     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1832
1833     switch (info.registerFormat()) {
1834     case DataFormatStorage:
1835         RELEASE_ASSERT_NOT_REACHED();
1836
1837     case DataFormatBoolean:
1838     case DataFormatCell:
1839         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1840         return GeneratedOperandTypeUnknown;
1841
1842     case DataFormatNone:
1843     case DataFormatJSCell:
1844     case DataFormatJS:
1845     case DataFormatJSBoolean:
1846     case DataFormatJSDouble:
1847         return GeneratedOperandJSValue;
1848
1849     case DataFormatJSInt32:
1850     case DataFormatInt32:
1851         return GeneratedOperandInteger;
1852
1853     default:
1854         RELEASE_ASSERT_NOT_REACHED();
1855         return GeneratedOperandTypeUnknown;
1856     }
1857 }
1858
1859 void SpeculativeJIT::compileValueToInt32(Node* node)
1860 {
1861     switch (node->child1().useKind()) {
1862 #if USE(JSVALUE64)
1863     case Int52RepUse: {
1864         SpeculateStrictInt52Operand op1(this, node->child1());
1865         GPRTemporary result(this, Reuse, op1);
1866         GPRReg op1GPR = op1.gpr();
1867         GPRReg resultGPR = result.gpr();
1868         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
1869         int32Result(resultGPR, node, DataFormatInt32);
1870         return;
1871     }
1872 #endif // USE(JSVALUE64)
1873         
1874     case DoubleRepUse: {
1875         GPRTemporary result(this);
1876         SpeculateDoubleOperand op1(this, node->child1());
1877         FPRReg fpr = op1.fpr();
1878         GPRReg gpr = result.gpr();
1879         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
1880         
1881         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
1882         
1883         int32Result(gpr, node);
1884         return;
1885     }
1886     
1887     case NumberUse:
1888     case NotCellUse: {
1889         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
1890         case GeneratedOperandInteger: {
1891             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
1892             GPRTemporary result(this, Reuse, op1);
1893             m_jit.move(op1.gpr(), result.gpr());
1894             int32Result(result.gpr(), node, op1.format());
1895             return;
1896         }
1897         case GeneratedOperandJSValue: {
1898             GPRTemporary result(this);
1899 #if USE(JSVALUE64)
1900             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1901
1902             GPRReg gpr = op1.gpr();
1903             GPRReg resultGpr = result.gpr();
1904             FPRTemporary tempFpr(this);
1905             FPRReg fpr = tempFpr.fpr();
1906
1907             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
1908             JITCompiler::JumpList converted;
1909
1910             if (node->child1().useKind() == NumberUse) {
1911                 DFG_TYPE_CHECK(
1912                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
1913                     m_jit.branchTest64(
1914                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
1915             } else {
1916                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
1917                 
1918                 DFG_TYPE_CHECK(
1919                     JSValueRegs(gpr), node->child1(), ~SpecCell, branchIsCell(JSValueRegs(gpr)));
1920                 
1921                 // It's not a cell: so true turns into 1 and all else turns into 0.
1922                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
1923                 converted.append(m_jit.jump());
1924                 
1925                 isNumber.link(&m_jit);
1926             }
1927
1928             // First, if we get here we have a double encoded as a JSValue
1929             m_jit.move(gpr, resultGpr);
1930             unboxDouble(resultGpr, fpr);
1931
1932             silentSpillAllRegisters(resultGpr);
1933             callOperation(toInt32, resultGpr, fpr);
1934             silentFillAllRegisters(resultGpr);
1935
1936             converted.append(m_jit.jump());
1937
1938             isInteger.link(&m_jit);
1939             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
1940
1941             converted.link(&m_jit);
1942 #else
1943             Node* childNode = node->child1().node();
1944             VirtualRegister virtualRegister = childNode->virtualRegister();
1945             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1946
1947             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1948
1949             GPRReg payloadGPR = op1.payloadGPR();
1950             GPRReg resultGpr = result.gpr();
1951         
1952             JITCompiler::JumpList converted;
1953
1954             if (info.registerFormat() == DataFormatJSInt32)
1955                 m_jit.move(payloadGPR, resultGpr);
1956             else {
1957                 GPRReg tagGPR = op1.tagGPR();
1958                 FPRTemporary tempFpr(this);
1959                 FPRReg fpr = tempFpr.fpr();
1960                 FPRTemporary scratch(this);
1961
1962                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
1963
1964                 if (node->child1().useKind() == NumberUse) {
1965                     DFG_TYPE_CHECK(
1966                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
1967                         m_jit.branch32(
1968                             MacroAssembler::AboveOrEqual, tagGPR,
1969                             TrustedImm32(JSValue::LowestTag)));
1970                 } else {
1971                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
1972                     
1973                     DFG_TYPE_CHECK(
1974                         op1.jsValueRegs(), node->child1(), ~SpecCell,
1975                         branchIsCell(op1.jsValueRegs()));
1976                     
1977                     // It's not a cell: so true turns into 1 and all else turns into 0.
1978                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
1979                     m_jit.move(TrustedImm32(0), resultGpr);
1980                     converted.append(m_jit.jump());
1981                     
1982                     isBoolean.link(&m_jit);
1983                     m_jit.move(payloadGPR, resultGpr);
1984                     converted.append(m_jit.jump());
1985                     
1986                     isNumber.link(&m_jit);
1987                 }
1988
1989                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
1990
1991                 silentSpillAllRegisters(resultGpr);
1992                 callOperation(toInt32, resultGpr, fpr);
1993                 silentFillAllRegisters(resultGpr);
1994
1995                 converted.append(m_jit.jump());
1996
1997                 isInteger.link(&m_jit);
1998                 m_jit.move(payloadGPR, resultGpr);
1999
2000                 converted.link(&m_jit);
2001             }
2002 #endif
2003             int32Result(resultGpr, node);
2004             return;
2005         }
2006         case GeneratedOperandTypeUnknown:
2007             RELEASE_ASSERT(!m_compileOkay);
2008             return;
2009         }
2010         RELEASE_ASSERT_NOT_REACHED();
2011         return;
2012     }
2013     
2014     default:
2015         ASSERT(!m_compileOkay);
2016         return;
2017     }
2018 }
2019
2020 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2021 {
2022     if (doesOverflow(node->arithMode())) {
2023         // We know that this sometimes produces doubles. So produce a double every
2024         // time. This at least allows subsequent code to not have weird conditionals.
2025             
2026         SpeculateInt32Operand op1(this, node->child1());
2027         FPRTemporary result(this);
2028             
2029         GPRReg inputGPR = op1.gpr();
2030         FPRReg outputFPR = result.fpr();
2031             
2032         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2033             
2034         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2035         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2036         positive.link(&m_jit);
2037             
2038         doubleResult(outputFPR, node);
2039         return;
2040     }
2041     
2042     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2043
2044     SpeculateInt32Operand op1(this, node->child1());
2045     GPRTemporary result(this);
2046
2047     m_jit.move(op1.gpr(), result.gpr());
2048
2049     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2050
2051     int32Result(result.gpr(), node, op1.format());
2052 }
2053
2054 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2055 {
2056     SpeculateDoubleOperand op1(this, node->child1());
2057     FPRTemporary scratch(this);
2058     GPRTemporary result(this);
2059     
2060     FPRReg valueFPR = op1.fpr();
2061     FPRReg scratchFPR = scratch.fpr();
2062     GPRReg resultGPR = result.gpr();
2063
2064     JITCompiler::JumpList failureCases;
2065     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2066     m_jit.branchConvertDoubleToInt32(
2067         valueFPR, resultGPR, failureCases, scratchFPR,
2068         shouldCheckNegativeZero(node->arithMode()));
2069     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2070
2071     int32Result(resultGPR, node);
2072 }
2073
2074 void SpeculativeJIT::compileDoubleRep(Node* node)
2075 {
2076     switch (node->child1().useKind()) {
2077     case NumberUse: {
2078         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2079     
2080         if (isInt32Speculation(m_state.forNode(node->child1()).m_type)) {
2081             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2082             FPRTemporary result(this);
2083             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2084             doubleResult(result.fpr(), node);
2085             return;
2086         }
2087     
2088         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2089         FPRTemporary result(this);
2090     
2091 #if USE(JSVALUE64)
2092         GPRTemporary temp(this);
2093
2094         GPRReg op1GPR = op1.gpr();
2095         GPRReg tempGPR = temp.gpr();
2096         FPRReg resultFPR = result.fpr();
2097     
2098         JITCompiler::Jump isInteger = m_jit.branch64(
2099             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2100     
2101         if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2102             typeCheck(
2103                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2104                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2105         }
2106     
2107         m_jit.move(op1GPR, tempGPR);
2108         unboxDouble(tempGPR, resultFPR);
2109         JITCompiler::Jump done = m_jit.jump();
2110     
2111         isInteger.link(&m_jit);
2112         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2113         done.link(&m_jit);
2114 #else // USE(JSVALUE64) -> this is the 32_64 case
2115         FPRTemporary temp(this);
2116     
2117         GPRReg op1TagGPR = op1.tagGPR();
2118         GPRReg op1PayloadGPR = op1.payloadGPR();
2119         FPRReg tempFPR = temp.fpr();
2120         FPRReg resultFPR = result.fpr();
2121     
2122         JITCompiler::Jump isInteger = m_jit.branch32(
2123             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2124     
2125         if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2126             typeCheck(
2127                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2128                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2129         }
2130     
2131         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2132         JITCompiler::Jump done = m_jit.jump();
2133     
2134         isInteger.link(&m_jit);
2135         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2136         done.link(&m_jit);
2137 #endif // USE(JSVALUE64)
2138     
2139         doubleResult(resultFPR, node);
2140         return;
2141     }
2142         
2143 #if USE(JSVALUE64)
2144     case Int52RepUse: {
2145         SpeculateStrictInt52Operand value(this, node->child1());
2146         FPRTemporary result(this);
2147         
2148         GPRReg valueGPR = value.gpr();
2149         FPRReg resultFPR = result.fpr();
2150
2151         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2152         
2153         doubleResult(resultFPR, node);
2154         return;
2155     }
2156 #endif // USE(JSVALUE64)
2157         
2158     default:
2159         RELEASE_ASSERT_NOT_REACHED();
2160         return;
2161     }
2162 }
2163
2164 void SpeculativeJIT::compileValueRep(Node* node)
2165 {
2166     switch (node->child1().useKind()) {
2167     case DoubleRepUse: {
2168         SpeculateDoubleOperand value(this, node->child1());
2169         JSValueRegsTemporary result(this);
2170         
2171         FPRReg valueFPR = value.fpr();
2172         JSValueRegs resultRegs = result.regs();
2173         
2174         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2175         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2176         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2177         // local was purified.
2178         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2179             m_jit.purifyNaN(valueFPR);
2180
2181         boxDouble(valueFPR, resultRegs);
2182         
2183         jsValueResult(resultRegs, node);
2184         return;
2185     }
2186         
2187 #if USE(JSVALUE64)
2188     case Int52RepUse: {
2189         SpeculateStrictInt52Operand value(this, node->child1());
2190         GPRTemporary result(this);
2191         
2192         GPRReg valueGPR = value.gpr();
2193         GPRReg resultGPR = result.gpr();
2194         
2195         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2196         
2197         jsValueResult(resultGPR, node);
2198         return;
2199     }
2200 #endif // USE(JSVALUE64)
2201         
2202     default:
2203         RELEASE_ASSERT_NOT_REACHED();
2204         return;
2205     }
2206 }
2207
2208 static double clampDoubleToByte(double d)
2209 {
2210     d += 0.5;
2211     if (!(d > 0))
2212         d = 0;
2213     else if (d > 255)
2214         d = 255;
2215     return d;
2216 }
2217
2218 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2219 {
2220     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2221     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2222     jit.xorPtr(result, result);
2223     MacroAssembler::Jump clamped = jit.jump();
2224     tooBig.link(&jit);
2225     jit.move(JITCompiler::TrustedImm32(255), result);
2226     clamped.link(&jit);
2227     inBounds.link(&jit);
2228 }
2229
2230 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2231 {
2232     // Unordered compare so we pick up NaN
2233     static const double zero = 0;
2234     static const double byteMax = 255;
2235     static const double half = 0.5;
2236     jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2237     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2238     jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2239     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2240     
2241     jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2242     // FIXME: This should probably just use a floating point round!
2243     // https://bugs.webkit.org/show_bug.cgi?id=72054
2244     jit.addDouble(source, scratch);
2245     jit.truncateDoubleToInt32(scratch, result);   
2246     MacroAssembler::Jump truncatedInt = jit.jump();
2247     
2248     tooSmall.link(&jit);
2249     jit.xorPtr(result, result);
2250     MacroAssembler::Jump zeroed = jit.jump();
2251     
2252     tooBig.link(&jit);
2253     jit.move(JITCompiler::TrustedImm32(255), result);
2254     
2255     truncatedInt.link(&jit);
2256     zeroed.link(&jit);
2257
2258 }
2259
2260 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2261 {
2262     if (node->op() == PutByValAlias)
2263         return JITCompiler::Jump();
2264     if (JSArrayBufferView* view = m_jit.graph().tryGetFoldableViewForChild1(node)) {
2265         uint32_t length = view->length();
2266         Node* indexNode = m_jit.graph().child(node, 1).node();
2267         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2268             return JITCompiler::Jump();
2269         return m_jit.branch32(
2270             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2271     }
2272     return m_jit.branch32(
2273         MacroAssembler::AboveOrEqual, indexGPR,
2274         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2275 }
2276
2277 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2278 {
2279     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2280     if (!jump.isSet())
2281         return;
2282     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2283 }
2284
2285 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2286 {
2287     ASSERT(isInt(type));
2288     
2289     SpeculateCellOperand base(this, node->child1());
2290     SpeculateStrictInt32Operand property(this, node->child2());
2291     StorageOperand storage(this, node->child3());
2292
2293     GPRReg baseReg = base.gpr();
2294     GPRReg propertyReg = property.gpr();
2295     GPRReg storageReg = storage.gpr();
2296
2297     GPRTemporary result(this);
2298     GPRReg resultReg = result.gpr();
2299
2300     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2301
2302     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2303     switch (elementSize(type)) {
2304     case 1:
2305         if (isSigned(type))
2306             m_jit.load8Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2307         else
2308             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2309         break;
2310     case 2:
2311         if (isSigned(type))
2312             m_jit.load16Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2313         else
2314             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2315         break;
2316     case 4:
2317         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2318         break;
2319     default:
2320         CRASH();
2321     }
2322     if (elementSize(type) < 4 || isSigned(type)) {
2323         int32Result(resultReg, node);
2324         return;
2325     }
2326     
2327     ASSERT(elementSize(type) == 4 && !isSigned(type));
2328     if (node->shouldSpeculateInt32()) {
2329         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2330         int32Result(resultReg, node);
2331         return;
2332     }
2333     
2334 #if USE(JSVALUE64)
2335     if (node->shouldSpeculateMachineInt()) {
2336         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2337         strictInt52Result(resultReg, node);
2338         return;
2339     }
2340 #endif
2341     
2342     FPRTemporary fresult(this);
2343     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2344     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2345     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2346     positive.link(&m_jit);
2347     doubleResult(fresult.fpr(), node);
2348 }
2349
2350 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2351 {
2352     ASSERT(isInt(type));
2353     
2354     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2355     GPRReg storageReg = storage.gpr();
2356     
2357     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2358     
2359     GPRTemporary value;
2360     GPRReg valueGPR = InvalidGPRReg;
2361     
2362     if (valueUse->isConstant()) {
2363         JSValue jsValue = valueUse->asJSValue();
2364         if (!jsValue.isNumber()) {
2365             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2366             noResult(node);
2367             return;
2368         }
2369         double d = jsValue.asNumber();
2370         if (isClamped(type)) {
2371             ASSERT(elementSize(type) == 1);
2372             d = clampDoubleToByte(d);
2373         }
2374         GPRTemporary scratch(this);
2375         GPRReg scratchReg = scratch.gpr();
2376         m_jit.move(Imm32(toInt32(d)), scratchReg);
2377         value.adopt(scratch);
2378         valueGPR = scratchReg;
2379     } else {
2380         switch (valueUse.useKind()) {
2381         case Int32Use: {
2382             SpeculateInt32Operand valueOp(this, valueUse);
2383             GPRTemporary scratch(this);
2384             GPRReg scratchReg = scratch.gpr();
2385             m_jit.move(valueOp.gpr(), scratchReg);
2386             if (isClamped(type)) {
2387                 ASSERT(elementSize(type) == 1);
2388                 compileClampIntegerToByte(m_jit, scratchReg);
2389             }
2390             value.adopt(scratch);
2391             valueGPR = scratchReg;
2392             break;
2393         }
2394             
2395 #if USE(JSVALUE64)
2396         case Int52RepUse: {
2397             SpeculateStrictInt52Operand valueOp(this, valueUse);
2398             GPRTemporary scratch(this);
2399             GPRReg scratchReg = scratch.gpr();
2400             m_jit.move(valueOp.gpr(), scratchReg);
2401             if (isClamped(type)) {
2402                 ASSERT(elementSize(type) == 1);
2403                 MacroAssembler::Jump inBounds = m_jit.branch64(
2404                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2405                 MacroAssembler::Jump tooBig = m_jit.branch64(
2406                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2407                 m_jit.move(TrustedImm32(0), scratchReg);
2408                 MacroAssembler::Jump clamped = m_jit.jump();
2409                 tooBig.link(&m_jit);
2410                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2411                 clamped.link(&m_jit);
2412                 inBounds.link(&m_jit);
2413             }
2414             value.adopt(scratch);
2415             valueGPR = scratchReg;
2416             break;
2417         }
2418 #endif // USE(JSVALUE64)
2419             
2420         case DoubleRepUse: {
2421             if (isClamped(type)) {
2422                 ASSERT(elementSize(type) == 1);
2423                 SpeculateDoubleOperand valueOp(this, valueUse);
2424                 GPRTemporary result(this);
2425                 FPRTemporary floatScratch(this);
2426                 FPRReg fpr = valueOp.fpr();
2427                 GPRReg gpr = result.gpr();
2428                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2429                 value.adopt(result);
2430                 valueGPR = gpr;
2431             } else {
2432                 SpeculateDoubleOperand valueOp(this, valueUse);
2433                 GPRTemporary result(this);
2434                 FPRReg fpr = valueOp.fpr();
2435                 GPRReg gpr = result.gpr();
2436                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2437                 m_jit.xorPtr(gpr, gpr);
2438                 MacroAssembler::Jump fixed = m_jit.jump();
2439                 notNaN.link(&m_jit);
2440                 
2441                 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2442                     fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2443                 
2444                 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr));
2445                 
2446                 fixed.link(&m_jit);
2447                 value.adopt(result);
2448                 valueGPR = gpr;
2449             }
2450             break;
2451         }
2452             
2453         default:
2454             RELEASE_ASSERT_NOT_REACHED();
2455             break;
2456         }
2457     }
2458     
2459     ASSERT_UNUSED(valueGPR, valueGPR != property);
2460     ASSERT(valueGPR != base);
2461     ASSERT(valueGPR != storageReg);
2462     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2463     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2464         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2465         outOfBounds = MacroAssembler::Jump();
2466     }
2467
2468     switch (elementSize(type)) {
2469     case 1:
2470         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2471         break;
2472     case 2:
2473         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2474         break;
2475     case 4:
2476         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2477         break;
2478     default:
2479         CRASH();
2480     }
2481     if (outOfBounds.isSet())
2482         outOfBounds.link(&m_jit);
2483     noResult(node);
2484 }
2485
2486 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2487 {
2488     ASSERT(isFloat(type));
2489     
2490     SpeculateCellOperand base(this, node->child1());
2491     SpeculateStrictInt32Operand property(this, node->child2());
2492     StorageOperand storage(this, node->child3());
2493
2494     GPRReg baseReg = base.gpr();
2495     GPRReg propertyReg = property.gpr();
2496     GPRReg storageReg = storage.gpr();
2497
2498     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2499
2500     FPRTemporary result(this);
2501     FPRReg resultReg = result.fpr();
2502     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2503     switch (elementSize(type)) {
2504     case 4:
2505         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2506         m_jit.convertFloatToDouble(resultReg, resultReg);
2507         break;
2508     case 8: {
2509         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2510         break;
2511     }
2512     default:
2513         RELEASE_ASSERT_NOT_REACHED();
2514     }
2515     
2516     doubleResult(resultReg, node);
2517 }
2518
2519 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2520 {
2521     ASSERT(isFloat(type));
2522     
2523     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2524     GPRReg storageReg = storage.gpr();
2525     
2526     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2527     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2528
2529     SpeculateDoubleOperand valueOp(this, valueUse);
2530     FPRTemporary scratch(this);
2531     FPRReg valueFPR = valueOp.fpr();
2532     FPRReg scratchFPR = scratch.fpr();
2533
2534     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2535     
2536     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2537     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2538         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2539         outOfBounds = MacroAssembler::Jump();
2540     }
2541     
2542     switch (elementSize(type)) {
2543     case 4: {
2544         m_jit.moveDouble(valueFPR, scratchFPR);
2545         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2546         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2547         break;
2548     }
2549     case 8:
2550         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2551         break;
2552     default:
2553         RELEASE_ASSERT_NOT_REACHED();
2554     }
2555     if (outOfBounds.isSet())
2556         outOfBounds.link(&m_jit);
2557     noResult(node);
2558 }
2559
2560 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2561 {
2562     // Check that prototype is an object.
2563     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfCellNotObject(prototypeReg));
2564     
2565     // Initialize scratchReg with the value being checked.
2566     m_jit.move(valueReg, scratchReg);
2567     
2568     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2569     MacroAssembler::Label loop(&m_jit);
2570     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2571     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2572     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2573 #if USE(JSVALUE64)
2574     branchIsCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2575 #else
2576     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2577 #endif
2578     
2579     // No match - result is false.
2580 #if USE(JSVALUE64)
2581     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2582 #else
2583     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2584 #endif
2585     MacroAssembler::Jump putResult = m_jit.jump();
2586     
2587     isInstance.link(&m_jit);
2588 #if USE(JSVALUE64)
2589     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2590 #else
2591     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2592 #endif
2593     
2594     putResult.link(&m_jit);
2595 }
2596
2597 void SpeculativeJIT::compileInstanceOf(Node* node)
2598 {
2599     if (node->child1().useKind() == UntypedUse) {
2600         // It might not be a cell. Speculate less aggressively.
2601         // Or: it might only be used once (i.e. by us), so we get zero benefit
2602         // from speculating any more aggressively than we absolutely need to.
2603         
2604         JSValueOperand value(this, node->child1());
2605         SpeculateCellOperand prototype(this, node->child2());
2606         GPRTemporary scratch(this);
2607         GPRTemporary scratch2(this);
2608         
2609         GPRReg prototypeReg = prototype.gpr();
2610         GPRReg scratchReg = scratch.gpr();
2611         GPRReg scratch2Reg = scratch2.gpr();
2612         
2613         MacroAssembler::Jump isCell = branchIsCell(value.jsValueRegs());
2614         GPRReg valueReg = value.jsValueRegs().payloadGPR();
2615         moveFalseTo(scratchReg);
2616
2617         MacroAssembler::Jump done = m_jit.jump();
2618         
2619         isCell.link(&m_jit);
2620         
2621         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2622         
2623         done.link(&m_jit);
2624
2625         blessedBooleanResult(scratchReg, node);
2626         return;
2627     }
2628     
2629     SpeculateCellOperand value(this, node->child1());
2630     SpeculateCellOperand prototype(this, node->child2());
2631     
2632     GPRTemporary scratch(this);
2633     GPRTemporary scratch2(this);
2634     
2635     GPRReg valueReg = value.gpr();
2636     GPRReg prototypeReg = prototype.gpr();
2637     GPRReg scratchReg = scratch.gpr();
2638     GPRReg scratch2Reg = scratch2.gpr();
2639     
2640     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2641
2642     blessedBooleanResult(scratchReg, node);
2643 }
2644
2645 void SpeculativeJIT::compileAdd(Node* node)
2646 {
2647     switch (node->binaryUseKind()) {
2648     case Int32Use: {
2649         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2650         
2651         if (node->child1()->isInt32Constant()) {
2652             int32_t imm1 = node->child1()->asInt32();
2653             SpeculateInt32Operand op2(this, node->child2());
2654             GPRTemporary result(this);
2655
2656             if (!shouldCheckOverflow(node->arithMode())) {
2657                 m_jit.move(op2.gpr(), result.gpr());
2658                 m_jit.add32(Imm32(imm1), result.gpr());
2659             } else
2660                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
2661
2662             int32Result(result.gpr(), node);
2663             return;
2664         }
2665         
2666         if (node->child2()->isInt32Constant()) {
2667             SpeculateInt32Operand op1(this, node->child1());
2668             int32_t imm2 = node->child2()->asInt32();
2669             GPRTemporary result(this);
2670                 
2671             if (!shouldCheckOverflow(node->arithMode())) {
2672                 m_jit.move(op1.gpr(), result.gpr());
2673                 m_jit.add32(Imm32(imm2), result.gpr());
2674             } else
2675                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
2676
2677             int32Result(result.gpr(), node);
2678             return;
2679         }
2680                 
2681         SpeculateInt32Operand op1(this, node->child1());
2682         SpeculateInt32Operand op2(this, node->child2());
2683         GPRTemporary result(this, Reuse, op1, op2);
2684
2685         GPRReg gpr1 = op1.gpr();
2686         GPRReg gpr2 = op2.gpr();
2687         GPRReg gprResult = result.gpr();
2688
2689         if (!shouldCheckOverflow(node->arithMode())) {
2690             if (gpr1 == gprResult)
2691                 m_jit.add32(gpr2, gprResult);
2692             else {
2693                 m_jit.move(gpr2, gprResult);
2694                 m_jit.add32(gpr1, gprResult);
2695             }
2696         } else {
2697             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
2698                 
2699             if (gpr1 == gprResult)
2700                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
2701             else if (gpr2 == gprResult)
2702                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
2703             else
2704                 speculationCheck(Overflow, JSValueRegs(), 0, check);
2705         }
2706
2707         int32Result(gprResult, node);
2708         return;
2709     }
2710         
2711 #if USE(JSVALUE64)
2712     case Int52RepUse: {
2713         ASSERT(shouldCheckOverflow(node->arithMode()));
2714         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2715
2716         // Will we need an overflow check? If we can prove that neither input can be
2717         // Int52 then the overflow check will not be necessary.
2718         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2719             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2720             SpeculateWhicheverInt52Operand op1(this, node->child1());
2721             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2722             GPRTemporary result(this, Reuse, op1);
2723             m_jit.move(op1.gpr(), result.gpr());
2724             m_jit.add64(op2.gpr(), result.gpr());
2725             int52Result(result.gpr(), node, op1.format());
2726             return;
2727         }
2728         
2729         SpeculateInt52Operand op1(this, node->child1());
2730         SpeculateInt52Operand op2(this, node->child2());
2731         GPRTemporary result(this);
2732         m_jit.move(op1.gpr(), result.gpr());
2733         speculationCheck(
2734             Int52Overflow, JSValueRegs(), 0,
2735             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2736         int52Result(result.gpr(), node);
2737         return;
2738     }
2739 #endif // USE(JSVALUE64)
2740     
2741     case DoubleRepUse: {
2742         SpeculateDoubleOperand op1(this, node->child1());
2743         SpeculateDoubleOperand op2(this, node->child2());
2744         FPRTemporary result(this, op1, op2);
2745
2746         FPRReg reg1 = op1.fpr();
2747         FPRReg reg2 = op2.fpr();
2748         m_jit.addDouble(reg1, reg2, result.fpr());
2749
2750         doubleResult(result.fpr(), node);
2751         return;
2752     }
2753         
2754     default:
2755         RELEASE_ASSERT_NOT_REACHED();
2756         break;
2757     }
2758 }
2759
2760 void SpeculativeJIT::compileMakeRope(Node* node)
2761 {
2762     ASSERT(node->child1().useKind() == KnownStringUse);
2763     ASSERT(node->child2().useKind() == KnownStringUse);
2764     ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
2765     
2766     SpeculateCellOperand op1(this, node->child1());
2767     SpeculateCellOperand op2(this, node->child2());
2768     SpeculateCellOperand op3(this, node->child3());
2769     GPRTemporary result(this);
2770     GPRTemporary allocator(this);
2771     GPRTemporary scratch(this);
2772     
2773     GPRReg opGPRs[3];
2774     unsigned numOpGPRs;
2775     opGPRs[0] = op1.gpr();
2776     opGPRs[1] = op2.gpr();
2777     if (node->child3()) {
2778         opGPRs[2] = op3.gpr();
2779         numOpGPRs = 3;
2780     } else {
2781         opGPRs[2] = InvalidGPRReg;
2782         numOpGPRs = 2;
2783     }
2784     GPRReg resultGPR = result.gpr();
2785     GPRReg allocatorGPR = allocator.gpr();
2786     GPRReg scratchGPR = scratch.gpr();
2787     
2788     JITCompiler::JumpList slowPath;
2789     MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithImmortalStructureDestructor(sizeof(JSRopeString));
2790     m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
2791     emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
2792         
2793     m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
2794     for (unsigned i = 0; i < numOpGPRs; ++i)
2795         m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2796     for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
2797         m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2798     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
2799     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
2800     if (!ASSERT_DISABLED) {
2801         JITCompiler::Jump ok = m_jit.branch32(
2802             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2803         m_jit.abortWithReason(DFGNegativeStringLength);
2804         ok.link(&m_jit);
2805     }
2806     for (unsigned i = 1; i < numOpGPRs; ++i) {
2807         m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
2808         speculationCheck(
2809             Uncountable, JSValueSource(), nullptr,
2810             m_jit.branchAdd32(
2811                 JITCompiler::Overflow,
2812                 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
2813     }
2814     m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
2815     m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
2816     if (!ASSERT_DISABLED) {
2817         JITCompiler::Jump ok = m_jit.branch32(
2818             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2819         m_jit.abortWithReason(DFGNegativeStringLength);
2820         ok.link(&m_jit);
2821     }
2822     m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
2823     
2824     switch (numOpGPRs) {
2825     case 2:
2826         addSlowPathGenerator(slowPathCall(
2827             slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
2828         break;
2829     case 3:
2830         addSlowPathGenerator(slowPathCall(
2831             slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
2832         break;
2833     default:
2834         RELEASE_ASSERT_NOT_REACHED();
2835         break;
2836     }
2837         
2838     cellResult(resultGPR, node);
2839 }
2840
2841 void SpeculativeJIT::compileArithSub(Node* node)
2842 {
2843     switch (node->binaryUseKind()) {
2844     case Int32Use: {
2845         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2846         
2847         if (node->child2()->isNumberConstant()) {
2848             SpeculateInt32Operand op1(this, node->child1());
2849             int32_t imm2 = node->child2()->asInt32();
2850             GPRTemporary result(this);
2851
2852             if (!shouldCheckOverflow(node->arithMode())) {
2853                 m_jit.move(op1.gpr(), result.gpr());
2854                 m_jit.sub32(Imm32(imm2), result.gpr());
2855             } else {
2856                 GPRTemporary scratch(this);
2857                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
2858             }
2859
2860             int32Result(result.gpr(), node);
2861             return;
2862         }
2863             
2864         if (node->child1()->isNumberConstant()) {
2865             int32_t imm1 = node->child1()->asInt32();
2866             SpeculateInt32Operand op2(this, node->child2());
2867             GPRTemporary result(this);
2868                 
2869             m_jit.move(Imm32(imm1), result.gpr());
2870             if (!shouldCheckOverflow(node->arithMode()))
2871                 m_jit.sub32(op2.gpr(), result.gpr());
2872             else
2873                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2874                 
2875             int32Result(result.gpr(), node);
2876             return;
2877         }
2878             
2879         SpeculateInt32Operand op1(this, node->child1());
2880         SpeculateInt32Operand op2(this, node->child2());
2881         GPRTemporary result(this);
2882
2883         if (!shouldCheckOverflow(node->arithMode())) {
2884             m_jit.move(op1.gpr(), result.gpr());
2885             m_jit.sub32(op2.gpr(), result.gpr());
2886         } else
2887             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
2888
2889         int32Result(result.gpr(), node);
2890         return;
2891     }
2892         
2893 #if USE(JSVALUE64)
2894     case Int52RepUse: {
2895         ASSERT(shouldCheckOverflow(node->arithMode()));
2896         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2897
2898         // Will we need an overflow check? If we can prove that neither input can be
2899         // Int52 then the overflow check will not be necessary.
2900         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2901             && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2902             SpeculateWhicheverInt52Operand op1(this, node->child1());
2903             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2904             GPRTemporary result(this, Reuse, op1);
2905             m_jit.move(op1.gpr(), result.gpr());
2906             m_jit.sub64(op2.gpr(), result.gpr());
2907             int52Result(result.gpr(), node, op1.format());
2908             return;
2909         }
2910         
2911         SpeculateInt52Operand op1(this, node->child1());
2912         SpeculateInt52Operand op2(this, node->child2());
2913         GPRTemporary result(this);
2914         m_jit.move(op1.gpr(), result.gpr());
2915         speculationCheck(
2916             Int52Overflow, JSValueRegs(), 0,
2917             m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2918         int52Result(result.gpr(), node);
2919         return;
2920     }
2921 #endif // USE(JSVALUE64)
2922
2923     case DoubleRepUse: {
2924         SpeculateDoubleOperand op1(this, node->child1());
2925         SpeculateDoubleOperand op2(this, node->child2());
2926         FPRTemporary result(this, op1);
2927
2928         FPRReg reg1 = op1.fpr();
2929         FPRReg reg2 = op2.fpr();
2930         m_jit.subDouble(reg1, reg2, result.fpr());
2931
2932         doubleResult(result.fpr(), node);
2933         return;
2934     }
2935         
2936     default:
2937         RELEASE_ASSERT_NOT_REACHED();
2938         return;
2939     }
2940 }
2941
2942 void SpeculativeJIT::compileArithNegate(Node* node)
2943 {
2944     switch (node->child1().useKind()) {
2945     case Int32Use: {
2946         SpeculateInt32Operand op1(this, node->child1());
2947         GPRTemporary result(this);
2948
2949         m_jit.move(op1.gpr(), result.gpr());
2950
2951         // Note: there is no notion of being not used as a number, but someone
2952         // caring about negative zero.
2953         
2954         if (!shouldCheckOverflow(node->arithMode()))
2955             m_jit.neg32(result.gpr());
2956         else if (!shouldCheckNegativeZero(node->arithMode()))
2957             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
2958         else {
2959             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
2960             m_jit.neg32(result.gpr());
2961         }
2962
2963         int32Result(result.gpr(), node);
2964         return;
2965     }
2966
2967 #if USE(JSVALUE64)
2968     case Int52RepUse: {
2969         ASSERT(shouldCheckOverflow(node->arithMode()));
2970         
2971         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)) {
2972             SpeculateWhicheverInt52Operand op1(this, node->child1());
2973             GPRTemporary result(this);
2974             GPRReg op1GPR = op1.gpr();
2975             GPRReg resultGPR = result.gpr();
2976             m_jit.move(op1GPR, resultGPR);
2977             m_jit.neg64(resultGPR);
2978             if (shouldCheckNegativeZero(node->arithMode())) {
2979                 speculationCheck(
2980                     NegativeZero, JSValueRegs(), 0,
2981                     m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
2982             }
2983             int52Result(resultGPR, node, op1.format());
2984             return;
2985         }
2986         
2987         SpeculateInt52Operand op1(this, node->child1());
2988         GPRTemporary result(this);
2989         GPRReg op1GPR = op1.gpr();
2990         GPRReg resultGPR = result.gpr();
2991         m_jit.move(op1GPR, resultGPR);
2992         speculationCheck(
2993             Int52Overflow, JSValueRegs(), 0,
2994             m_jit.branchNeg64(MacroAssembler::Overflow, resultGPR));
2995         if (shouldCheckNegativeZero(node->arithMode())) {
2996             speculationCheck(
2997                 NegativeZero, JSValueRegs(), 0,
2998                 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
2999         }
3000         int52Result(resultGPR, node);
3001         return;
3002     }
3003 #endif // USE(JSVALUE64)
3004         
3005     case DoubleRepUse: {
3006         SpeculateDoubleOperand op1(this, node->child1());
3007         FPRTemporary result(this);
3008         
3009         m_jit.negateDouble(op1.fpr(), result.fpr());
3010         
3011         doubleResult(result.fpr(), node);
3012         return;
3013     }
3014         
3015     default:
3016         RELEASE_ASSERT_NOT_REACHED();
3017         return;
3018     }
3019 }
3020 void SpeculativeJIT::compileArithMul(Node* node)
3021 {
3022     switch (node->binaryUseKind()) {
3023     case Int32Use: {
3024         SpeculateInt32Operand op1(this, node->child1());
3025         SpeculateInt32Operand op2(this, node->child2());
3026         GPRTemporary result(this);
3027
3028         GPRReg reg1 = op1.gpr();
3029         GPRReg reg2 = op2.gpr();
3030
3031         // We can perform truncated multiplications if we get to this point, because if the
3032         // fixup phase could not prove that it would be safe, it would have turned us into
3033         // a double multiplication.
3034         if (!shouldCheckOverflow(node->arithMode())) {
3035             m_jit.move(reg1, result.gpr());
3036             m_jit.mul32(reg2, result.gpr());
3037         } else {
3038             speculationCheck(
3039                 Overflow, JSValueRegs(), 0,
3040                 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3041         }
3042             
3043         // Check for negative zero, if the users of this node care about such things.
3044         if (shouldCheckNegativeZero(node->arithMode())) {
3045             MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3046             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
3047             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
3048             resultNonZero.link(&m_jit);
3049         }
3050
3051         int32Result(result.gpr(), node);
3052         return;
3053     }
3054     
3055 #if USE(JSVALUE64)   
3056     case Int52RepUse: {
3057         ASSERT(shouldCheckOverflow(node->arithMode()));
3058         
3059         // This is super clever. We want to do an int52 multiplication and check the
3060         // int52 overflow bit. There is no direct hardware support for this, but we do
3061         // have the ability to do an int64 multiplication and check the int64 overflow
3062         // bit. We leverage that. Consider that a, b are int52 numbers inside int64
3063         // registers, with the high 12 bits being sign-extended. We can do:
3064         //
3065         //     (a * (b << 12))
3066         //
3067         // This will give us a left-shifted int52 (value is in high 52 bits, low 16
3068         // bits are zero) plus the int52 overflow bit. I.e. whether this 64-bit
3069         // multiplication overflows is identical to whether the 'a * b' 52-bit
3070         // multiplication overflows.
3071         //
3072         // In our nomenclature, this is:
3073         //
3074         //     strictInt52(a) * int52(b) => int52
3075         //
3076         // That is "strictInt52" means unshifted and "int52" means left-shifted by 16
3077         // bits.
3078         //
3079         // We don't care which of op1 or op2 serves as the left-shifted operand, so
3080         // we just do whatever is more convenient for op1 and have op2 do the
3081         // opposite. This ensures that we do at most one shift.
3082
3083         SpeculateWhicheverInt52Operand op1(this, node->child1());
3084         SpeculateWhicheverInt52Operand op2(this, node->child2(), OppositeShift, op1);
3085         GPRTemporary result(this);
3086         
3087         GPRReg op1GPR = op1.gpr();
3088         GPRReg op2GPR = op2.gpr();
3089         GPRReg resultGPR = result.gpr();
3090         
3091         m_jit.move(op1GPR, resultGPR);
3092         speculationCheck(
3093             Int52Overflow, JSValueRegs(), 0,
3094             m_jit.branchMul64(MacroAssembler::Overflow, op2GPR, resultGPR));
3095         
3096         if (shouldCheckNegativeZero(node->arithMode())) {
3097             MacroAssembler::Jump resultNonZero = m_jit.branchTest64(
3098                 MacroAssembler::NonZero, resultGPR);
3099             speculationCheck(
3100                 NegativeZero, JSValueRegs(), 0,
3101                 m_jit.branch64(MacroAssembler::LessThan, op1GPR, TrustedImm64(0)));
3102             speculationCheck(
3103                 NegativeZero, JSValueRegs(), 0,
3104                 m_jit.branch64(MacroAssembler::LessThan, op2GPR, TrustedImm64(0)));
3105             resultNonZero.link(&m_jit);
3106         }
3107         
3108         int52Result(resultGPR, node);
3109         return;
3110     }
3111 #endif // USE(JSVALUE64)
3112         
3113     case DoubleRepUse: {
3114         SpeculateDoubleOperand op1(this, node->child1());
3115         SpeculateDoubleOperand op2(this, node->child2());
3116         FPRTemporary result(this, op1, op2);
3117         
3118         FPRReg reg1 = op1.fpr();
3119         FPRReg reg2 = op2.fpr();
3120         
3121         m_jit.mulDouble(reg1, reg2, result.fpr());
3122         
3123         doubleResult(result.fpr(), node);
3124         return;
3125     }
3126         
3127     default:
3128         RELEASE_ASSERT_NOT_REACHED();
3129         return;
3130     }
3131 }
3132
3133 void SpeculativeJIT::compileArithDiv(Node* node)
3134 {
3135     switch (node->binaryUseKind()) {
3136     case Int32Use: {
3137 #if CPU(X86) || CPU(X86_64)
3138         SpeculateInt32Operand op1(this, node->child1());
3139         SpeculateInt32Operand op2(this, node->child2());
3140         GPRTemporary eax(this, X86Registers::eax);
3141         GPRTemporary edx(this, X86Registers::edx);
3142         GPRReg op1GPR = op1.gpr();
3143         GPRReg op2GPR = op2.gpr();
3144     
3145         GPRReg op2TempGPR;
3146         GPRReg temp;
3147         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3148             op2TempGPR = allocate();
3149             temp = op2TempGPR;
3150         } else {
3151             op2TempGPR = InvalidGPRReg;
3152             if (op1GPR == X86Registers::eax)
3153                 temp = X86Registers::edx;
3154             else
3155                 temp = X86Registers::eax;
3156         }
3157     
3158         ASSERT(temp != op1GPR);
3159         ASSERT(temp != op2GPR);
3160     
3161         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3162     
3163         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3164     
3165         JITCompiler::JumpList done;
3166         if (shouldCheckOverflow(node->arithMode())) {
3167             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3168             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3169         } else {
3170             // This is the case where we convert the result to an int after we're done, and we
3171             // already know that the denominator is either -1 or 0. So, if the denominator is
3172             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3173             // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
3174             // are happy to fall through to a normal division, since we're just dividing
3175             // something by negative 1.
3176         
3177             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3178             m_jit.move(TrustedImm32(0), eax.gpr());
3179             done.append(m_jit.jump());
3180         
3181             notZero.link(&m_jit);
3182             JITCompiler::Jump notNeg2ToThe31 =
3183                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3184             m_jit.zeroExtend32ToPtr(op1GPR, eax.gpr());
3185             done.append(m_jit.jump());
3186         
3187             notNeg2ToThe31.link(&m_jit);
3188         }
3189     
3190         safeDenominator.link(&m_jit);
3191     
3192         // If the user cares about negative zero, then speculate that we're not about
3193         // to produce negative zero.
3194         if (shouldCheckNegativeZero(node->arithMode())) {
3195             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3196             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3197             numeratorNonZero.link(&m_jit);
3198         }
3199     
3200         if (op2TempGPR != InvalidGPRReg) {
3201             m_jit.move(op2GPR, op2TempGPR);
3202             op2GPR = op2TempGPR;
3203         }
3204             
3205         m_jit.move(op1GPR, eax.gpr());
3206         m_jit.assembler().cdq();
3207         m_jit.assembler().idivl_r(op2GPR);
3208             
3209         if (op2TempGPR != InvalidGPRReg)
3210             unlock(op2TempGPR);
3211
3212         // Check that there was no remainder. If there had been, then we'd be obligated to
3213         // produce a double result instead.
3214         if (shouldCheckOverflow(node->arithMode()))
3215             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
3216         
3217         done.link(&m_jit);
3218         int32Result(eax.gpr(), node);
3219 #elif CPU(APPLE_ARMV7S) || CPU(ARM64)
3220         SpeculateInt32Operand op1(this, node->child1());
3221         SpeculateInt32Operand op2(this, node->child2());
3222         GPRReg op1GPR = op1.gpr();
3223         GPRReg op2GPR = op2.gpr();
3224         GPRTemporary quotient(this);
3225         GPRTemporary multiplyAnswer(this);
3226
3227         // If the user cares about negative zero, then speculate that we're not about
3228         // to produce negative zero.
3229         if (shouldCheckNegativeZero(node->arithMode())) {
3230             MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3231             speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3232             numeratorNonZero.link(&m_jit);
3233         }
3234
3235         m_jit.assembler().sdiv<32>(quotient.gpr(), op1GPR, op2GPR);
3236
3237         // Check that there was no remainder. If there had been, then we'd be obligated to
3238         // produce a double result instead.
3239         if (shouldCheckOverflow(node->arithMode())) {
3240             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
3241             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
3242         }
3243
3244         int32Result(quotient.gpr(), node);
3245 #else
3246         RELEASE_ASSERT_NOT_REACHED();
3247 #endif
3248         break;
3249     }
3250         
3251     case DoubleRepUse: {
3252         SpeculateDoubleOperand op1(this, node->child1());
3253         SpeculateDoubleOperand op2(this, node->child2());
3254         FPRTemporary result(this, op1);
3255         
3256         FPRReg reg1 = op1.fpr();
3257         FPRReg reg2 = op2.fpr();
3258         m_jit.divDouble(reg1, reg2, result.fpr());
3259         
3260         doubleResult(result.fpr(), node);
3261         break;
3262     }
3263         
3264     default:
3265         RELEASE_ASSERT_NOT_REACHED();
3266         break;
3267     }
3268 }
3269
3270 void SpeculativeJIT::compileArithMod(Node* node)
3271 {
3272     switch (node->binaryUseKind()) {
3273     case Int32Use: {
3274         // In the fast path, the dividend value could be the final result
3275         // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
3276         SpeculateStrictInt32Operand op1(this, node->child1());
3277         
3278         if (node->child2()->isInt32Constant()) {
3279             int32_t divisor = node->child2()->asInt32();
3280             if (divisor > 1 && hasOneBitSet(divisor)) {
3281                 unsigned logarithm = WTF::fastLog2(divisor);
3282                 GPRReg dividendGPR = op1.gpr();
3283                 GPRTemporary result(this);
3284                 GPRReg resultGPR = result.gpr();
3285
3286                 // This is what LLVM generates. It's pretty crazy. Here's my
3287                 // attempt at understanding it.
3288                 
3289                 // First, compute either divisor - 1, or 0, depending on whether
3290                 // the dividend is negative:
3291                 //
3292                 // If dividend < 0:  resultGPR = divisor - 1
3293                 // If dividend >= 0: resultGPR = 0
3294                 m_jit.move(dividendGPR, resultGPR);
3295                 m_jit.rshift32(TrustedImm32(31), resultGPR);
3296                 m_jit.urshift32(TrustedImm32(32 - logarithm), resultGPR);
3297                 
3298                 // Add in the dividend, so that:
3299                 //
3300                 // If dividend < 0:  resultGPR = dividend + divisor - 1
3301                 // If dividend >= 0: resultGPR = dividend
3302                 m_jit.add32(dividendGPR, resultGPR);
3303                 
3304                 // Mask so as to only get the *high* bits. This rounds down
3305                 // (towards negative infinity) resultGPR to the nearest multiple
3306                 // of divisor, so that:
3307                 //
3308                 // If dividend < 0:  resultGPR = floor((dividend + divisor - 1) / divisor)
3309                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3310                 //
3311                 // Note that this can be simplified to:
3312                 //
3313                 // If dividend < 0:  resultGPR = ceil(dividend / divisor)
3314                 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3315                 //
3316                 // Note that if the dividend is negative, resultGPR will also be negative.
3317                 // Regardless of the sign of dividend, resultGPR will be rounded towards
3318                 // zero, because of how things are conditionalized.
3319                 m_jit.and32(TrustedImm32(-divisor), resultGPR);
3320                 
3321                 // Subtract resultGPR from dividendGPR, which yields the remainder:
3322                 //
3323                 // resultGPR = dividendGPR - resultGPR
3324                 m_jit.neg32(resultGPR);
3325                 m_jit.add32(dividendGPR, resultGPR);
3326                 
3327                 if (shouldCheckNegativeZero(node->arithMode())) {
3328                     // Check that we're not about to create negative zero.
3329                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3330                     speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, resultGPR));
3331                     numeratorPositive.link(&m_jit);
3332                 }
3333
3334                 int32Result(resultGPR, node);
3335                 return;
3336             }
3337         }
3338         
3339 #if CPU(X86) || CPU(X86_64)
3340         if (node->child2()->isInt32Constant()) {
3341             int32_t divisor = node->child2()->asInt32();
3342             if (divisor && divisor != -1) {
3343                 GPRReg op1Gpr = op1.gpr();
3344
3345                 GPRTemporary eax(this, X86Registers::eax);
3346                 GPRTemporary edx(this, X86Registers::edx);
3347                 GPRTemporary scratch(this);
3348                 GPRReg scratchGPR = scratch.gpr();
3349
3350                 GPRReg op1SaveGPR;
3351                 if (op1Gpr == X86Registers::eax || op1Gpr == X86Registers::edx) {
3352                     op1SaveGPR = allocate();
3353                     ASSERT(op1Gpr != op1SaveGPR);
3354                     m_jit.move(op1Gpr, op1SaveGPR);
3355                 } else
3356                     op1SaveGPR = op1Gpr;
3357                 ASSERT(op1SaveGPR != X86Registers::eax);
3358                 ASSERT(op1SaveGPR != X86Registers::edx);
3359
3360                 m_jit.move(op1Gpr, eax.gpr());
3361                 m_jit.move(TrustedImm32(divisor), scratchGPR);
3362                 m_jit.assembler().cdq();
3363                 m_jit.assembler().idivl_r(scratchGPR);
3364                 if (shouldCheckNegativeZero(node->arithMode())) {
3365                     JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3366                     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3367                     numeratorPositive.link(&m_jit);
3368                 }
3369             
3370                 if (op1SaveGPR != op1Gpr)
3371                     unlock(op1SaveGPR);
3372
3373                 int32Result(edx.gpr(), node);
3374                 return;
3375             }
3376         }
3377 #endif
3378
3379         SpeculateInt32Operand op2(this, node->child2());
3380 #if CPU(X86) || CPU(X86_64)
3381         GPRTemporary eax(this, X86Registers::eax);
3382         GPRTemporary edx(this, X86Registers::edx);
3383         GPRReg op1GPR = op1.gpr();
3384         GPRReg op2GPR = op2.gpr();
3385     
3386         GPRReg op2TempGPR;
3387         GPRReg temp;
3388         GPRReg op1SaveGPR;
3389     
3390         if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3391             op2TempGPR = allocate();
3392             temp = op2TempGPR;
3393         } else {
3394             op2TempGPR = InvalidGPRReg;
3395             if (op1GPR == X86Registers::eax)
3396                 temp = X86Registers::edx;
3397             else
3398                 temp = X86Registers::eax;
3399         }
3400     
3401         if (op1GPR == X86Registers::eax || op1GPR == X86Registers::edx) {
3402             op1SaveGPR = allocate();
3403             ASSERT(op1GPR != op1SaveGPR);
3404             m_jit.move(op1GPR, op1SaveGPR);
3405         } else
3406             op1SaveGPR = op1GPR;
3407     
3408         ASSERT(temp != op1GPR);
3409         ASSERT(temp != op2GPR);
3410         ASSERT(op1SaveGPR != X86Registers::eax);
3411         ASSERT(op1SaveGPR != X86Registers::edx);
3412     
3413         m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3414     
3415         JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3416     
3417         JITCompiler::JumpList done;
3418         
3419         // FIXME: -2^31 / -1 will actually yield negative zero, so we could have a
3420         // separate case for that. But it probably doesn't matter so much.
3421         if (shouldCheckOverflow(node->arithMode())) {
3422             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3423             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3424         } else {
3425             // This is the case where we convert the result to an int after we're done, and we
3426             // already know that the denominator is either -1 or 0. So, if the denominator is
3427             // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3428             // -1) and the numerator is -2^31 then the result should be 0. Otherwise we are
3429             // happy to fall through to a normal division, since we're just dividing something
3430             // by negative 1.
3431         
3432             JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3433             m_jit.move(TrustedImm32(0), edx.gpr());
3434             done.append(m_jit.jump());
3435         
3436             notZero.link(&m_jit);
3437             JITCompiler::Jump notNeg2ToThe31 =
3438                 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3439             m_jit.move(TrustedImm32(0), edx.gpr());
3440             done.append(m_jit.jump());
3441         
3442             notNeg2ToThe31.link(&m_jit);
3443         }
3444         
3445         safeDenominator.link(&m_jit);
3446             
3447         if (op2TempGPR != InvalidGPRReg) {
3448             m_jit.move(op2GPR, op2TempGPR);
3449             op2GPR = op2TempGPR;
3450         }
3451             
3452         m_jit.move(op1GPR, eax.gpr());
3453         m_jit.assembler().cdq();
3454         m_jit.assembler().idivl_r(op2GPR);
3455             
3456         if (op2TempGPR != InvalidGPRReg)
3457             unlock(op2TempGPR);
3458
3459         // Check that we're not about to create negative zero.
3460         if (shouldCheckNegativeZero(node->arithMode())) {
3461             JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3462             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3463             numeratorPositive.link(&m_jit);
3464         }
3465     
3466         if (op1SaveGPR != op1GPR)
3467             unlock(op1SaveGPR);
3468             
3469         done.link(&m_jit);
3470         int32Result(edx.gpr(), node);
3471
3472 #elif CPU(ARM64) || CPU(APPLE_ARMV7S)
3473         GPRTemporary temp(this);
3474         GPRTemporary quotientThenRemainder(this);
3475         GPRTemporary multiplyAnswer(this);
3476         GPRReg dividendGPR = op1.gpr();
3477         GPRReg divisorGPR = op2.gpr();
3478         GPRReg quotientThenRemainderGPR = quotientThenRemainder.gpr();
3479         GPRReg multiplyAnswerGPR = multiplyAnswer.gpr();
3480
3481         JITCompiler::JumpList done;
3482     
3483         if (shouldCheckOverflow(node->arithMode()))
3484             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, divisorGPR));
3485         else {
3486             JITCompiler::Jump denominatorNotZero = m_jit.branchTest32(JITCompiler::NonZero, divisorGPR);
3487             m_jit.move(divisorGPR, quotientThenRemainderGPR);
3488             done.append(m_jit.jump());
3489             denominatorNotZero.link(&m_jit);
3490         }
3491
3492         m_jit.assembler().sdiv<32>(quotientThenRemainderGPR, dividendGPR, divisorGPR);
3493         // FIXME: It seems like there are cases where we don't need this? What if we have
3494         // arithMode() == Arith::Unchecked?
3495         // https://bugs.webkit.org/show_bug.cgi?id=126444
3496         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotientThenRemainderGPR, divisorGPR, multiplyAnswerGPR));
3497 #if CPU(APPLE_ARMV7S)
3498         m_jit.assembler().sub(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3499 #else
3500         m_jit.assembler().sub<32>(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3501 #endif
3502
3503         // If the user cares about negative zero, then speculate that we're not about
3504         // to produce negative zero.
3505         if (shouldCheckNegativeZero(node->arithMode())) {
3506             // Check that we're not about to create negative zero.
3507             JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3508             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, quotientThenRemainderGPR));
3509             numeratorPositive.link(&m_jit);
3510         }
3511
3512         done.link(&m_jit);
3513
3514         int32Result(quotientThenRemainderGPR, node);
3515 #else // not architecture that can do integer division
3516         RELEASE_ASSERT_NOT_REACHED();
3517 #endif
3518         return;
3519     }
3520         
3521     case DoubleRepUse: {
3522         SpeculateDoubleOperand op1(this, node->child1());
3523         SpeculateDoubleOperand op2(this, node->child2());
3524         
3525         FPRReg op1FPR = op1.fpr();
3526         FPRReg op2FPR = op2.fpr();
3527         
3528         flushRegisters();
3529         
3530         FPRResult result(this);
3531         
3532         callOperation(fmodAsDFGOperation, result.fpr(), op1FPR, op2FPR);
3533         
3534         doubleResult(result.fpr(), node);
3535         return;
3536     }
3537         
3538     default:
3539         RELEASE_ASSERT_NOT_REACHED();
3540         return;
3541     }
3542 }
3543
3544 // Returns true if the compare is fused with a subsequent branch.
3545 bool SpeculativeJIT::compare(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
3546 {
3547     if (compilePeepHoleBranch(node, condition, doubleCondition, operation))
3548         return true;
3549
3550     if (node->isBinaryUseKind(Int32Use)) {
3551         compileInt32Compare(node, condition);
3552         return false;
3553     }
3554     
3555 #if USE(JSVALUE64)
3556     if (node->isBinaryUseKind(Int52RepUse)) {
3557         compileInt52Compare(node, condition);
3558         return false;
3559     }
3560 #endif // USE(JSVALUE64)
3561     
3562     if (node->isBinaryUseKind(DoubleRepUse)) {
3563         compileDoubleCompare(node, doubleCondition);
3564         return false;
3565     }
3566     
3567     if (node->op() == CompareEq) {
3568         if (node->isBinaryUseKind(StringUse)) {
3569             compileStringEquality(node);
3570             return false;
3571         }
3572         
3573         if (node->isBinaryUseKind(BooleanUse)) {
3574             compileBooleanCompare(node, condition);
3575             return false;
3576         }
3577
3578         if (node->isBinaryUseKind(StringIdentUse)) {
3579             compileStringIdentEquality(node);
3580             return false;
3581         }
3582         
3583         if (node->isBinaryUseKind(ObjectUse)) {
3584             compileObjectEquality(node);
3585             return false;
3586         }
3587         
3588         if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse)) {
3589             compileObjectToObjectOrOtherEquality(node->child1(), node->child2());
3590             return false;
3591         }
3592         
3593         if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse)) {
3594             compileObjectToObjectOrOtherEquality(node->child2(), node->child1());
3595             return false;
3596         }
3597     }
3598     
3599     nonSpeculativeNonPeepholeCompare(node, condition, operation);
3600     return false;
3601 }
3602
3603 bool SpeculativeJIT::compileStrictEq(Node* node)
3604 {
3605     if (node->isBinaryUseKind(BooleanUse)) {
3606         unsigned branchIndexInBlock = detectPeepHoleBranch();
3607         if (branchIndexInBlock != UINT_MAX) {
3608             Node* branchNode = m_block->at(branchIndexInBlock);
3609             compilePeepHoleBooleanBranch(node, branchNode, MacroAssembler::Equal);
3610             use(node->child1());
3611             use(node->child2());
3612             m_indexInBlock = branchIndexInBlock;
3613             m_currentNode = branchNode;
3614             return true;
3615         }
3616         compileBooleanCompare(node, MacroAssembler::Equal);
3617         return false;
3618     }
3619
3620     if (node->isBinaryUseKind(Int32Use)) {
3621         unsigned branchIndexInBlock = detectPeepHoleBranch();
3622         if (branchIndexInBlock != UINT_MAX) {
3623             Node* branchNode = m_block->at(branchIndexInBlock);
3624             compilePeepHoleInt32Branch(node, branchNode, MacroAssembler::Equal);
3625             use(node->child1());
3626             use(node->child2());
3627             m_indexInBlock = branchIndexInBlock;
3628             m_currentNode = branchNode;
3629             return true;
3630         }
3631         compileInt32Compare(node, MacroAssembler::Equal);
3632         return false;
3633     }
3634     
3635 #if USE(JSVALUE64)   
3636     if (node->isBinaryUseKind(Int52RepUse)) {
3637         unsigned branchIndexInBlock = detectPeepHoleBranch();
3638         if (branchIndexInBlock != UINT_MAX) {
3639             Node* branchNode = m_block->at(branchIndexInBlock);
3640             compilePeepHoleInt52Branch(node, branchNode, MacroAssembler::Equal);
3641             use(node->child1());
3642             use(node->child2());
3643             m_indexInBlock = branchIndexInBlock;
3644             m_currentNode = branchNode;
3645             return true;
3646         }
3647         compileInt52Compare(node, MacroAssembler::Equal);
3648         return false;
3649     }
3650 #endif // USE(JSVALUE64)
3651
3652     if (node->isBinaryUseKind(DoubleRepUse)) {
3653         unsigned branchIndexInBlock = detectPeepHoleBranch();
3654         if (branchIndexInBlock != UINT_MAX) {
3655             Node* branchNode = m_block->at(branchIndexInBlock);
3656             compilePeepHoleDoubleBranch(node, branchNode, MacroAssembler::DoubleEqual);
3657             use(node->child1());
3658             use(node->child2());
3659             m_indexInBlock = branchIndexInBlock;
3660             m_currentNode = branchNode;
3661             return true;
3662         }
3663         compileDoubleCompare(node, MacroAssembler::DoubleEqual);
3664         return false;
3665     }
3666     
3667     if (node->isBinaryUseKind(StringUse)) {
3668         compileStringEquality(node);
3669         return false;
3670     }
3671     
3672     if (node->isBinaryUseKind(StringIdentUse)) {
3673         compileStringIdentEquality(node);
3674         return false;
3675     }
3676
3677     if (node->isBinaryUseKind(ObjectUse)) {
3678         unsigned branchIndexInBlock = detectPeepHoleBranch();
3679         if (branchIndexInBlock != UINT_MAX) {
3680             Node* branchNode = m_block->at(branchIndexInBlock);
3681             compilePeepHoleObjectEquality(node, branchNode);
3682             use(node->child1());
3683             use(node->child2());
3684             m_indexInBlock = branchIndexInBlock;
3685             m_currentNode = branchNode;
3686             return true;
3687         }
3688         compileObjectEquality(node);
3689         return false;
3690     }
3691
3692     if (node->isBinaryUseKind(MiscUse, UntypedUse)
3693         || node->isBinaryUseKind(UntypedUse, MiscUse)) {
3694         compileMiscStrictEq(node);
3695         return false;
3696     }
3697     
3698     if (node->isBinaryUseKind(StringIdentUse, NotStringVarUse)) {
3699         compileStringIdentToNotStringVarEquality(node, node->child1(), node->child2());
3700         return false;
3701     }
3702     
3703     if (node->isBinaryUseKind(NotStringVarUse, StringIdentUse)) {
3704         compileStringIdentToNotStringVarEquality(node, node->child2(), node->child1());
3705         return false;
3706     }
3707     
3708     if (node->isBinaryUseKind(StringUse, UntypedUse)) {
3709         compileStringToUntypedEquality(node, node->child1(), node->child2());
3710         return false;
3711     }
3712     
3713     if (node->isBinaryUseKind(UntypedUse, StringUse)) {
3714         compileStringToUntypedEquality(node, node->child2(), node->child1());
3715         return false;
3716     }
3717     
3718     RELEASE_ASSERT(node->isBinaryUseKind(UntypedUse));
3719     return nonSpeculativeStrictEq(node);
3720 }
3721
3722 void SpeculativeJIT::compileBooleanCompare(Node* node, MacroAssembler::RelationalCondition condition)
3723 {
3724     SpeculateBooleanOperand op1(this, node->child1());
3725     SpeculateBooleanOperand op2(this, node->child2());
3726     GPRTemporary result(this);
3727     
3728     m_jit.compare32(condition, op1.gpr(), op2.gpr(), result.gpr());
3729     
3730     unblessedBooleanResult(result.gpr(), node);
3731 }
3732
3733 void SpeculativeJIT::compileStringEquality(
3734     Node* node, GPRReg leftGPR, GPRReg rightGPR, GPRReg lengthGPR, GPRReg leftTempGPR,
3735     GPRReg rightTempGPR, GPRReg leftTemp2GPR, GPRReg rightTemp2GPR,
3736     JITCompiler::JumpList fastTrue, JITCompiler::JumpList fastFalse)
3737 {
3738     JITCompiler::JumpList trueCase;
3739     JITCompiler::JumpList falseCase;
3740     JITCompiler::JumpList slowCase;
3741     
3742     trueCase.append(fastTrue);
3743     falseCase.append(fastFalse);
3744
3745     m_jit.load32(MacroAssembler::Address(leftGPR, JSString::offsetOfLength()), lengthGPR);
3746     
3747     falseCase.append(m_jit.branch32(
3748         MacroAssembler::NotEqual,
3749         MacroAssembler::Address(rightGPR, JSString::offsetOfLength()),
3750         lengthGPR));
3751     
3752     trueCase.append(m_jit.branchTest32(MacroAssembler::Zero, lengthGPR));
3753     
3754     m_jit.loadPtr(MacroAssembler::Address(leftGPR, JSString::offsetOfValue()), leftTempGPR);
3755     m_jit.loadPtr(MacroAssembler::Address(rightGPR, JSString::offsetOfValue()), rightTempGPR);
3756     
3757     slowCase.append(m_jit.branchTestPtr(MacroAssembler::Zero, leftTempGPR));
3758     slowCase.append(m_jit.branchTestPtr(MacroAssembler::Zero, rightTempGPR));
3759     
3760     slowCase.append(m_jit.branchTest32(
3761         MacroAssembler::Zero,
3762         MacroAssembler::Address(leftTempGPR, StringImpl::flagsOffset()),
3763         TrustedImm32(StringImpl::flagIs8Bit())));
3764     slowCase.append(m_jit.branchTest32(
3765         MacroAssembler::Zero,
3766         MacroAssembler::Address(rightTempGPR, StringImpl::flagsOffset()),
3767         TrustedImm32(StringImpl::flagIs8Bit())));
3768     
3769     m_jit.loadPtr(MacroAssembler::Address(leftTempGPR, StringImpl::dataOffset()), leftTempGPR);
3770     m_jit.loadPtr(MacroAssembler::Address(rightTempGPR, StringImpl::dataOffset()), rightTempGPR);
3771     
3772     MacroAssembler::Label loop = m_jit.label();
3773     
3774     m_jit.sub32(TrustedImm32(1), lengthGPR);
3775
3776     // This isn't going to generate the best code on x86. But that's OK, it's still better
3777     // than not inlining.
3778     m_jit.load8(MacroAssembler::BaseIndex(leftTempGPR, lengthGPR, MacroAssembler::TimesOne), leftTemp2GPR);
3779     m_jit.load8(MacroAssembler::BaseIndex(rightTempGPR, lengthGPR, MacroAssembler::TimesOne), rightTemp2GPR);
3780     falseCase.append(m_jit.branch32(MacroAssembler::NotEqual, leftTemp2GPR, rightTemp2GPR));
3781     
3782     m_jit.branchTest32(MacroAssembler::NonZero, lengthGPR).linkTo(loop, &m_jit);
3783     
3784     trueCase.link(&m_jit);
3785     moveTrueTo(leftTempGPR);
3786     
3787     JITCompiler::Jump done = m_jit.jump();
3788
3789     falseCase.link(&m_jit);
3790     moveFalseTo(leftTempGPR);
3791     
3792     done.link(&m_jit);
3793     addSlowPathGenerator(
3794