5af11f47330c9659a0cf6c04e41b0140476bf120
[WebKit.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGOSRExitFuzz.h"
38 #include "DFGSaneStringGetByValSlowPathGenerator.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DirectArguments.h"
41 #include "JITAddGenerator.h"
42 #include "JITBitAndGenerator.h"
43 #include "JITBitOrGenerator.h"
44 #include "JITBitXorGenerator.h"
45 #include "JITDivGenerator.h"
46 #include "JITLeftShiftGenerator.h"
47 #include "JITMulGenerator.h"
48 #include "JITRightShiftGenerator.h"
49 #include "JITSubGenerator.h"
50 #include "JSCInlines.h"
51 #include "JSEnvironmentRecord.h"
52 #include "JSGeneratorFunction.h"
53 #include "JSLexicalEnvironment.h"
54 #include "LinkBuffer.h"
55 #include "RegExpConstructor.h"
56 #include "ScopedArguments.h"
57 #include "ScratchRegisterAllocator.h"
58 #include "WriteBarrierBuffer.h"
59 #include <wtf/MathExtras.h>
60
61 namespace JSC { namespace DFG {
62
63 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
64     : m_compileOkay(true)
65     , m_jit(jit)
66     , m_currentNode(0)
67     , m_lastGeneratedNode(LastNodeType)
68     , m_indexInBlock(0)
69     , m_generationInfo(m_jit.graph().frameRegisterCount())
70     , m_state(m_jit.graph())
71     , m_interpreter(m_jit.graph(), m_state)
72     , m_stream(&jit.jitCode()->variableEventStream)
73     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
74 {
75 }
76
77 SpeculativeJIT::~SpeculativeJIT()
78 {
79 }
80
81 void SpeculativeJIT::emitAllocateRawObject(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements, unsigned vectorLength)
82 {
83     IndexingType indexingType = structure->indexingType();
84     bool hasIndexingHeader = hasIndexedProperties(indexingType);
85
86     unsigned inlineCapacity = structure->inlineCapacity();
87     unsigned outOfLineCapacity = structure->outOfLineCapacity();
88     
89     GPRTemporary scratch(this);
90     GPRTemporary scratch2(this);
91     GPRReg scratchGPR = scratch.gpr();
92     GPRReg scratch2GPR = scratch2.gpr();
93
94     ASSERT(vectorLength >= numElements);
95     vectorLength = std::max(BASE_VECTOR_LEN, vectorLength);
96     
97     JITCompiler::JumpList slowCases;
98
99     size_t size = 0;
100     if (hasIndexingHeader)
101         size += vectorLength * sizeof(JSValue) + sizeof(IndexingHeader);
102     size += outOfLineCapacity * sizeof(JSValue);
103
104     if (size) {
105         slowCases.append(
106             emitAllocateBasicStorage(TrustedImm32(size), storageGPR));
107         if (hasIndexingHeader)
108             m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
109         else
110             m_jit.addPtr(TrustedImm32(sizeof(IndexingHeader)), storageGPR);
111     } else
112         m_jit.move(TrustedImmPtr(0), storageGPR);
113
114     size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
115     MarkedAllocator* allocatorPtr = &m_jit.vm()->heap.allocatorForObjectWithoutDestructor(allocationSize);
116     m_jit.move(TrustedImmPtr(allocatorPtr), scratchGPR);
117     emitAllocateJSObject(resultGPR, scratchGPR, TrustedImmPtr(structure), storageGPR, scratch2GPR, slowCases);
118
119     if (hasIndexingHeader)
120         m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
121
122     // I want a slow path that also loads out the storage pointer, and that's
123     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
124     // of work for a very small piece of functionality. :-/
125     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
126         slowCases, this, operationNewRawObject, resultGPR, storageGPR,
127         structure, vectorLength));
128
129     if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
130 #if USE(JSVALUE64)
131         m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
132         for (unsigned i = numElements; i < vectorLength; ++i)
133             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
134 #else
135         EncodedValueDescriptor value;
136         value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
137         for (unsigned i = numElements; i < vectorLength; ++i) {
138             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
139             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
140         }
141 #endif
142     }
143     
144     if (hasIndexingHeader)
145         m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
146 }
147
148 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
149 {
150     if (inlineCallFrame && !inlineCallFrame->isVarargs())
151         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
152     else {
153         VirtualRegister argumentCountRegister;
154         if (!inlineCallFrame)
155             argumentCountRegister = VirtualRegister(CallFrameSlot::argumentCount);
156         else
157             argumentCountRegister = inlineCallFrame->argumentCountRegister;
158         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
159         if (!includeThis)
160             m_jit.sub32(TrustedImm32(1), lengthGPR);
161     }
162 }
163
164 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
165 {
166     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
167 }
168
169 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
170 {
171     if (origin.inlineCallFrame) {
172         if (origin.inlineCallFrame->isClosureCall) {
173             m_jit.loadPtr(
174                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
175                 calleeGPR);
176         } else {
177             m_jit.move(
178                 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
179                 calleeGPR);
180         }
181     } else
182         m_jit.loadPtr(JITCompiler::addressFor(CallFrameSlot::callee), calleeGPR);
183 }
184
185 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
186 {
187     m_jit.addPtr(
188         TrustedImm32(
189             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
190         GPRInfo::callFrameRegister, startGPR);
191 }
192
193 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
194 {
195     if (!doOSRExitFuzzing())
196         return MacroAssembler::Jump();
197     
198     MacroAssembler::Jump result;
199     
200     m_jit.pushToSave(GPRInfo::regT0);
201     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
202     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
203     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
204     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
205     unsigned at = Options::fireOSRExitFuzzAt();
206     if (at || atOrAfter) {
207         unsigned threshold;
208         MacroAssembler::RelationalCondition condition;
209         if (atOrAfter) {
210             threshold = atOrAfter;
211             condition = MacroAssembler::Below;
212         } else {
213             threshold = at;
214             condition = MacroAssembler::NotEqual;
215         }
216         MacroAssembler::Jump ok = m_jit.branch32(
217             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
218         m_jit.popToRestore(GPRInfo::regT0);
219         result = m_jit.jump();
220         ok.link(&m_jit);
221     }
222     m_jit.popToRestore(GPRInfo::regT0);
223     
224     return result;
225 }
226
227 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
228 {
229     if (!m_compileOkay)
230         return;
231     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
232     if (fuzzJump.isSet()) {
233         JITCompiler::JumpList jumpsToFail;
234         jumpsToFail.append(fuzzJump);
235         jumpsToFail.append(jumpToFail);
236         m_jit.appendExitInfo(jumpsToFail);
237     } else
238         m_jit.appendExitInfo(jumpToFail);
239     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
240 }
241
242 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
243 {
244     if (!m_compileOkay)
245         return;
246     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
247     if (fuzzJump.isSet()) {
248         JITCompiler::JumpList myJumpsToFail;
249         myJumpsToFail.append(jumpsToFail);
250         myJumpsToFail.append(fuzzJump);
251         m_jit.appendExitInfo(myJumpsToFail);
252     } else
253         m_jit.appendExitInfo(jumpsToFail);
254     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
255 }
256
257 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
258 {
259     if (!m_compileOkay)
260         return OSRExitJumpPlaceholder();
261     unsigned index = m_jit.jitCode()->osrExit.size();
262     m_jit.appendExitInfo();
263     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
264     return OSRExitJumpPlaceholder(index);
265 }
266
267 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
268 {
269     return speculationCheck(kind, jsValueSource, nodeUse.node());
270 }
271
272 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
273 {
274     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
275 }
276
277 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
278 {
279     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
280 }
281
282 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
283 {
284     if (!m_compileOkay)
285         return;
286     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
287     m_jit.appendExitInfo(jumpToFail);
288     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
289 }
290
291 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
292 {
293     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
294 }
295
296 void SpeculativeJIT::emitInvalidationPoint(Node* node)
297 {
298     if (!m_compileOkay)
299         return;
300     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
301     m_jit.jitCode()->appendOSRExit(OSRExit(
302         UncountableInvalidation, JSValueSource(),
303         m_jit.graph().methodOfGettingAValueProfileFor(node),
304         this, m_stream->size()));
305     info.m_replacementSource = m_jit.watchpointLabel();
306     ASSERT(info.m_replacementSource.isSet());
307     noResult(node);
308 }
309
310 void SpeculativeJIT::unreachable(Node* node)
311 {
312     m_compileOkay = false;
313     m_jit.abortWithReason(DFGUnreachableNode, node->op());
314 }
315
316 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
317 {
318     if (!m_compileOkay)
319         return;
320     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
321     m_compileOkay = false;
322     if (verboseCompilationEnabled())
323         dataLog("Bailing compilation.\n");
324 }
325
326 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
327 {
328     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
329 }
330
331 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind exitKind)
332 {
333     ASSERT(needsTypeCheck(edge, typesPassedThrough));
334     m_interpreter.filter(edge, typesPassedThrough);
335     speculationCheck(exitKind, source, edge.node(), jumpToFail);
336 }
337
338 RegisterSet SpeculativeJIT::usedRegisters()
339 {
340     RegisterSet result;
341     
342     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
343         GPRReg gpr = GPRInfo::toRegister(i);
344         if (m_gprs.isInUse(gpr))
345             result.set(gpr);
346     }
347     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
348         FPRReg fpr = FPRInfo::toRegister(i);
349         if (m_fprs.isInUse(fpr))
350             result.set(fpr);
351     }
352     
353     result.merge(RegisterSet::stubUnavailableRegisters());
354     
355     return result;
356 }
357
358 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
359 {
360     m_slowPathGenerators.append(WTFMove(slowPathGenerator));
361 }
362
363 void SpeculativeJIT::addSlowPathGenerator(std::function<void()> lambda)
364 {
365     m_slowPathLambdas.append(std::make_pair(lambda, m_currentNode));
366 }
367
368 void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder)
369 {
370     for (auto& slowPathGenerator : m_slowPathGenerators) {
371         pcToCodeOriginMapBuilder.appendItem(m_jit.label(), slowPathGenerator->origin().semantic);
372         slowPathGenerator->generate(this);
373     }
374     for (auto& generatorPair : m_slowPathLambdas) {
375         Node* currentNode = generatorPair.second;
376         m_currentNode = currentNode;
377         pcToCodeOriginMapBuilder.appendItem(m_jit.label(), currentNode->origin.semantic);
378         generatorPair.first();
379     }
380 }
381
382 void SpeculativeJIT::clearGenerationInfo()
383 {
384     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
385         m_generationInfo[i] = GenerationInfo();
386     m_gprs = RegisterBank<GPRInfo>();
387     m_fprs = RegisterBank<FPRInfo>();
388 }
389
390 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
391 {
392     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
393     Node* node = info.node();
394     DataFormat registerFormat = info.registerFormat();
395     ASSERT(registerFormat != DataFormatNone);
396     ASSERT(registerFormat != DataFormatDouble);
397         
398     SilentSpillAction spillAction;
399     SilentFillAction fillAction;
400         
401     if (!info.needsSpill())
402         spillAction = DoNothingForSpill;
403     else {
404 #if USE(JSVALUE64)
405         ASSERT(info.gpr() == source);
406         if (registerFormat == DataFormatInt32)
407             spillAction = Store32Payload;
408         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
409             spillAction = StorePtr;
410         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
411             spillAction = Store64;
412         else {
413             ASSERT(registerFormat & DataFormatJS);
414             spillAction = Store64;
415         }
416 #elif USE(JSVALUE32_64)
417         if (registerFormat & DataFormatJS) {
418             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
419             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
420         } else {
421             ASSERT(info.gpr() == source);
422             spillAction = Store32Payload;
423         }
424 #endif
425     }
426         
427     if (registerFormat == DataFormatInt32) {
428         ASSERT(info.gpr() == source);
429         ASSERT(isJSInt32(info.registerFormat()));
430         if (node->hasConstant()) {
431             ASSERT(node->isInt32Constant());
432             fillAction = SetInt32Constant;
433         } else
434             fillAction = Load32Payload;
435     } else if (registerFormat == DataFormatBoolean) {
436 #if USE(JSVALUE64)
437         RELEASE_ASSERT_NOT_REACHED();
438 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
439         fillAction = DoNothingForFill;
440 #endif
441 #elif USE(JSVALUE32_64)
442         ASSERT(info.gpr() == source);
443         if (node->hasConstant()) {
444             ASSERT(node->isBooleanConstant());
445             fillAction = SetBooleanConstant;
446         } else
447             fillAction = Load32Payload;
448 #endif
449     } else if (registerFormat == DataFormatCell) {
450         ASSERT(info.gpr() == source);
451         if (node->hasConstant()) {
452             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
453             node->asCell(); // To get the assertion.
454             fillAction = SetCellConstant;
455         } else {
456 #if USE(JSVALUE64)
457             fillAction = LoadPtr;
458 #else
459             fillAction = Load32Payload;
460 #endif
461         }
462     } else if (registerFormat == DataFormatStorage) {
463         ASSERT(info.gpr() == source);
464         fillAction = LoadPtr;
465     } else if (registerFormat == DataFormatInt52) {
466         if (node->hasConstant())
467             fillAction = SetInt52Constant;
468         else if (info.spillFormat() == DataFormatInt52)
469             fillAction = Load64;
470         else if (info.spillFormat() == DataFormatStrictInt52)
471             fillAction = Load64ShiftInt52Left;
472         else if (info.spillFormat() == DataFormatNone)
473             fillAction = Load64;
474         else {
475             RELEASE_ASSERT_NOT_REACHED();
476 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
477             fillAction = Load64; // Make GCC happy.
478 #endif
479         }
480     } else if (registerFormat == DataFormatStrictInt52) {
481         if (node->hasConstant())
482             fillAction = SetStrictInt52Constant;
483         else if (info.spillFormat() == DataFormatInt52)
484             fillAction = Load64ShiftInt52Right;
485         else if (info.spillFormat() == DataFormatStrictInt52)
486             fillAction = Load64;
487         else if (info.spillFormat() == DataFormatNone)
488             fillAction = Load64;
489         else {
490             RELEASE_ASSERT_NOT_REACHED();
491 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
492             fillAction = Load64; // Make GCC happy.
493 #endif
494         }
495     } else {
496         ASSERT(registerFormat & DataFormatJS);
497 #if USE(JSVALUE64)
498         ASSERT(info.gpr() == source);
499         if (node->hasConstant()) {
500             if (node->isCellConstant())
501                 fillAction = SetTrustedJSConstant;
502             else
503                 fillAction = SetJSConstant;
504         } else if (info.spillFormat() == DataFormatInt32) {
505             ASSERT(registerFormat == DataFormatJSInt32);
506             fillAction = Load32PayloadBoxInt;
507         } else
508             fillAction = Load64;
509 #else
510         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
511         if (node->hasConstant())
512             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
513         else if (info.payloadGPR() == source)
514             fillAction = Load32Payload;
515         else { // Fill the Tag
516             switch (info.spillFormat()) {
517             case DataFormatInt32:
518                 ASSERT(registerFormat == DataFormatJSInt32);
519                 fillAction = SetInt32Tag;
520                 break;
521             case DataFormatCell:
522                 ASSERT(registerFormat == DataFormatJSCell);
523                 fillAction = SetCellTag;
524                 break;
525             case DataFormatBoolean:
526                 ASSERT(registerFormat == DataFormatJSBoolean);
527                 fillAction = SetBooleanTag;
528                 break;
529             default:
530                 fillAction = Load32Tag;
531                 break;
532             }
533         }
534 #endif
535     }
536         
537     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
538 }
539     
540 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
541 {
542     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
543     Node* node = info.node();
544     ASSERT(info.registerFormat() == DataFormatDouble);
545
546     SilentSpillAction spillAction;
547     SilentFillAction fillAction;
548         
549     if (!info.needsSpill())
550         spillAction = DoNothingForSpill;
551     else {
552         ASSERT(!node->hasConstant());
553         ASSERT(info.spillFormat() == DataFormatNone);
554         ASSERT(info.fpr() == source);
555         spillAction = StoreDouble;
556     }
557         
558 #if USE(JSVALUE64)
559     if (node->hasConstant()) {
560         node->asNumber(); // To get the assertion.
561         fillAction = SetDoubleConstant;
562     } else {
563         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
564         fillAction = LoadDouble;
565     }
566 #elif USE(JSVALUE32_64)
567     ASSERT(info.registerFormat() == DataFormatDouble);
568     if (node->hasConstant()) {
569         node->asNumber(); // To get the assertion.
570         fillAction = SetDoubleConstant;
571     } else
572         fillAction = LoadDouble;
573 #endif
574
575     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
576 }
577     
578 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
579 {
580     switch (plan.spillAction()) {
581     case DoNothingForSpill:
582         break;
583     case Store32Tag:
584         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
585         break;
586     case Store32Payload:
587         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
588         break;
589     case StorePtr:
590         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
591         break;
592 #if USE(JSVALUE64)
593     case Store64:
594         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
595         break;
596 #endif
597     case StoreDouble:
598         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
599         break;
600     default:
601         RELEASE_ASSERT_NOT_REACHED();
602     }
603 }
604     
605 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
606 {
607 #if USE(JSVALUE32_64)
608     UNUSED_PARAM(canTrample);
609 #endif
610     switch (plan.fillAction()) {
611     case DoNothingForFill:
612         break;
613     case SetInt32Constant:
614         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
615         break;
616 #if USE(JSVALUE64)
617     case SetInt52Constant:
618         m_jit.move(Imm64(plan.node()->asAnyInt() << JSValue::int52ShiftAmount), plan.gpr());
619         break;
620     case SetStrictInt52Constant:
621         m_jit.move(Imm64(plan.node()->asAnyInt()), plan.gpr());
622         break;
623 #endif // USE(JSVALUE64)
624     case SetBooleanConstant:
625         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
626         break;
627     case SetCellConstant:
628         m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
629         break;
630 #if USE(JSVALUE64)
631     case SetTrustedJSConstant:
632         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
633         break;
634     case SetJSConstant:
635         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
636         break;
637     case SetDoubleConstant:
638         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
639         m_jit.move64ToDouble(canTrample, plan.fpr());
640         break;
641     case Load32PayloadBoxInt:
642         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
643         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
644         break;
645     case Load32PayloadConvertToInt52:
646         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
647         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
648         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
649         break;
650     case Load32PayloadSignExtend:
651         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
652         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
653         break;
654 #else
655     case SetJSConstantTag:
656         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
657         break;
658     case SetJSConstantPayload:
659         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
660         break;
661     case SetInt32Tag:
662         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
663         break;
664     case SetCellTag:
665         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
666         break;
667     case SetBooleanTag:
668         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
669         break;
670     case SetDoubleConstant:
671         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
672         break;
673 #endif
674     case Load32Tag:
675         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
676         break;
677     case Load32Payload:
678         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
679         break;
680     case LoadPtr:
681         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
682         break;
683 #if USE(JSVALUE64)
684     case Load64:
685         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
686         break;
687     case Load64ShiftInt52Right:
688         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
689         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
690         break;
691     case Load64ShiftInt52Left:
692         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
693         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
694         break;
695 #endif
696     case LoadDouble:
697         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
698         break;
699     default:
700         RELEASE_ASSERT_NOT_REACHED();
701     }
702 }
703     
704 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
705 {
706     switch (arrayMode.arrayClass()) {
707     case Array::OriginalArray: {
708         CRASH();
709 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
710         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
711         return result;
712 #endif
713     }
714         
715     case Array::Array:
716         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
717         return m_jit.branch32(
718             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
719         
720     case Array::NonArray:
721     case Array::OriginalNonArray:
722         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
723         return m_jit.branch32(
724             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
725         
726     case Array::PossiblyArray:
727         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
728         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
729     }
730     
731     RELEASE_ASSERT_NOT_REACHED();
732     return JITCompiler::Jump();
733 }
734
735 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
736 {
737     JITCompiler::JumpList result;
738     
739     switch (arrayMode.type()) {
740     case Array::Int32:
741         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
742
743     case Array::Double:
744         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
745
746     case Array::Contiguous:
747         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
748
749     case Array::Undecided:
750         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
751
752     case Array::ArrayStorage:
753     case Array::SlowPutArrayStorage: {
754         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
755         
756         if (arrayMode.isJSArray()) {
757             if (arrayMode.isSlowPut()) {
758                 result.append(
759                     m_jit.branchTest32(
760                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
761                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
762                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
763                 result.append(
764                     m_jit.branch32(
765                         MacroAssembler::Above, tempGPR,
766                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
767                 break;
768             }
769             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
770             result.append(
771                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
772             break;
773         }
774         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
775         if (arrayMode.isSlowPut()) {
776             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
777             result.append(
778                 m_jit.branch32(
779                     MacroAssembler::Above, tempGPR,
780                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
781             break;
782         }
783         result.append(
784             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
785         break;
786     }
787     default:
788         CRASH();
789         break;
790     }
791     
792     return result;
793 }
794
795 void SpeculativeJIT::checkArray(Node* node)
796 {
797     ASSERT(node->arrayMode().isSpecific());
798     ASSERT(!node->arrayMode().doesConversion());
799     
800     SpeculateCellOperand base(this, node->child1());
801     GPRReg baseReg = base.gpr();
802     
803     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
804         noResult(m_currentNode);
805         return;
806     }
807     
808     const ClassInfo* expectedClassInfo = 0;
809     
810     switch (node->arrayMode().type()) {
811     case Array::AnyTypedArray:
812     case Array::String:
813         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
814         break;
815     case Array::Int32:
816     case Array::Double:
817     case Array::Contiguous:
818     case Array::Undecided:
819     case Array::ArrayStorage:
820     case Array::SlowPutArrayStorage: {
821         GPRTemporary temp(this);
822         GPRReg tempGPR = temp.gpr();
823         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
824         speculationCheck(
825             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
826             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
827         
828         noResult(m_currentNode);
829         return;
830     }
831     case Array::DirectArguments:
832         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
833         noResult(m_currentNode);
834         return;
835     case Array::ScopedArguments:
836         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
837         noResult(m_currentNode);
838         return;
839     default:
840         speculateCellTypeWithoutTypeFiltering(
841             node->child1(), baseReg,
842             typeForTypedArrayType(node->arrayMode().typedArrayType()));
843         noResult(m_currentNode);
844         return;
845     }
846     
847     RELEASE_ASSERT(expectedClassInfo);
848     
849     GPRTemporary temp(this);
850     GPRTemporary temp2(this);
851     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
852     speculationCheck(
853         BadType, JSValueSource::unboxedCell(baseReg), node,
854         m_jit.branchPtr(
855             MacroAssembler::NotEqual,
856             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
857             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
858     
859     noResult(m_currentNode);
860 }
861
862 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
863 {
864     ASSERT(node->arrayMode().doesConversion());
865     
866     GPRTemporary temp(this);
867     GPRTemporary structure;
868     GPRReg tempGPR = temp.gpr();
869     GPRReg structureGPR = InvalidGPRReg;
870     
871     if (node->op() != ArrayifyToStructure) {
872         GPRTemporary realStructure(this);
873         structure.adopt(realStructure);
874         structureGPR = structure.gpr();
875     }
876         
877     // We can skip all that comes next if we already have array storage.
878     MacroAssembler::JumpList slowPath;
879     
880     if (node->op() == ArrayifyToStructure) {
881         slowPath.append(m_jit.branchWeakStructure(
882             JITCompiler::NotEqual,
883             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
884             node->structure()));
885     } else {
886         m_jit.load8(
887             MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
888         
889         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
890     }
891     
892     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
893         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
894     
895     noResult(m_currentNode);
896 }
897
898 void SpeculativeJIT::arrayify(Node* node)
899 {
900     ASSERT(node->arrayMode().isSpecific());
901     
902     SpeculateCellOperand base(this, node->child1());
903     
904     if (!node->child2()) {
905         arrayify(node, base.gpr(), InvalidGPRReg);
906         return;
907     }
908     
909     SpeculateInt32Operand property(this, node->child2());
910     
911     arrayify(node, base.gpr(), property.gpr());
912 }
913
914 GPRReg SpeculativeJIT::fillStorage(Edge edge)
915 {
916     VirtualRegister virtualRegister = edge->virtualRegister();
917     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
918     
919     switch (info.registerFormat()) {
920     case DataFormatNone: {
921         if (info.spillFormat() == DataFormatStorage) {
922             GPRReg gpr = allocate();
923             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
924             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
925             info.fillStorage(*m_stream, gpr);
926             return gpr;
927         }
928         
929         // Must be a cell; fill it as a cell and then return the pointer.
930         return fillSpeculateCell(edge);
931     }
932         
933     case DataFormatStorage: {
934         GPRReg gpr = info.gpr();
935         m_gprs.lock(gpr);
936         return gpr;
937     }
938         
939     default:
940         return fillSpeculateCell(edge);
941     }
942 }
943
944 void SpeculativeJIT::useChildren(Node* node)
945 {
946     if (node->flags() & NodeHasVarArgs) {
947         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
948             if (!!m_jit.graph().m_varArgChildren[childIdx])
949                 use(m_jit.graph().m_varArgChildren[childIdx]);
950         }
951     } else {
952         Edge child1 = node->child1();
953         if (!child1) {
954             ASSERT(!node->child2() && !node->child3());
955             return;
956         }
957         use(child1);
958         
959         Edge child2 = node->child2();
960         if (!child2) {
961             ASSERT(!node->child3());
962             return;
963         }
964         use(child2);
965         
966         Edge child3 = node->child3();
967         if (!child3)
968             return;
969         use(child3);
970     }
971 }
972
973 void SpeculativeJIT::compileTryGetById(Node* node)
974 {
975     switch (node->child1().useKind()) {
976     case CellUse: {
977         SpeculateCellOperand base(this, node->child1());
978         JSValueRegsTemporary result(this, Reuse, base);
979
980         JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
981         JSValueRegs resultRegs = result.regs();
982
983         base.use();
984
985         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), JITCompiler::Jump(), NeedToSpill, AccessType::GetPure);
986
987         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
988         break;
989     }
990
991     case UntypedUse: {
992         JSValueOperand base(this, node->child1());
993         JSValueRegsTemporary result(this, Reuse, base);
994
995         JSValueRegs baseRegs = base.jsValueRegs();
996         JSValueRegs resultRegs = result.regs();
997
998         base.use();
999
1000         JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
1001
1002         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), notCell, DontSpill, AccessType::GetPure);
1003
1004         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1005         break;
1006     }
1007
1008     default:
1009         DFG_CRASH(m_jit.graph(), node, "Bad use kind");
1010         break;
1011     } 
1012 }
1013
1014 void SpeculativeJIT::compileIn(Node* node)
1015 {
1016     SpeculateCellOperand base(this, node->child2());
1017     GPRReg baseGPR = base.gpr();
1018     
1019     if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
1020         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
1021             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
1022             
1023             GPRTemporary result(this);
1024             GPRReg resultGPR = result.gpr();
1025
1026             use(node->child1());
1027             
1028             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
1029             MacroAssembler::Label done = m_jit.label();
1030             
1031             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
1032             // we can cast it to const AtomicStringImpl* safely.
1033             auto slowPath = slowPathCall(
1034                 jump.m_jump, this, operationInOptimize,
1035                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
1036                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
1037             
1038             stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
1039             stubInfo->codeOrigin = node->origin.semantic;
1040             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
1041             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
1042 #if USE(JSVALUE32_64)
1043             stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
1044             stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
1045 #endif
1046             stubInfo->patch.usedRegisters = usedRegisters();
1047
1048             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
1049             addSlowPathGenerator(WTFMove(slowPath));
1050
1051             base.use();
1052
1053             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1054             return;
1055         }
1056     }
1057
1058     JSValueOperand key(this, node->child1());
1059     JSValueRegs regs = key.jsValueRegs();
1060         
1061     GPRFlushedCallResult result(this);
1062     GPRReg resultGPR = result.gpr();
1063         
1064     base.use();
1065     key.use();
1066         
1067     flushRegisters();
1068     callOperation(
1069         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
1070         baseGPR, regs);
1071     m_jit.exceptionCheck();
1072     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1073 }
1074
1075 void SpeculativeJIT::compileDeleteById(Node* node)
1076 {
1077     JSValueOperand value(this, node->child1());
1078     GPRFlushedCallResult result(this);
1079
1080     JSValueRegs valueRegs = value.jsValueRegs();
1081     GPRReg resultGPR = result.gpr();
1082
1083     value.use();
1084
1085     flushRegisters();
1086     callOperation(operationDeleteById, resultGPR, valueRegs, identifierUID(node->identifierNumber()));
1087     m_jit.exceptionCheck();
1088
1089     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1090 }
1091
1092 void SpeculativeJIT::compileDeleteByVal(Node* node)
1093 {
1094     JSValueOperand base(this, node->child1());
1095     JSValueOperand key(this, node->child2());
1096     GPRFlushedCallResult result(this);
1097
1098     JSValueRegs baseRegs = base.jsValueRegs();
1099     JSValueRegs keyRegs = key.jsValueRegs();
1100     GPRReg resultGPR = result.gpr();
1101
1102     base.use();
1103     key.use();
1104
1105     flushRegisters();
1106     callOperation(operationDeleteByVal, resultGPR, baseRegs, keyRegs);
1107     m_jit.exceptionCheck();
1108
1109     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1110 }
1111
1112 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1113 {
1114     unsigned branchIndexInBlock = detectPeepHoleBranch();
1115     if (branchIndexInBlock != UINT_MAX) {
1116         Node* branchNode = m_block->at(branchIndexInBlock);
1117
1118         ASSERT(node->adjustedRefCount() == 1);
1119         
1120         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1121     
1122         m_indexInBlock = branchIndexInBlock;
1123         m_currentNode = branchNode;
1124         
1125         return true;
1126     }
1127     
1128     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1129     
1130     return false;
1131 }
1132
1133 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1134 {
1135     unsigned branchIndexInBlock = detectPeepHoleBranch();
1136     if (branchIndexInBlock != UINT_MAX) {
1137         Node* branchNode = m_block->at(branchIndexInBlock);
1138
1139         ASSERT(node->adjustedRefCount() == 1);
1140         
1141         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1142     
1143         m_indexInBlock = branchIndexInBlock;
1144         m_currentNode = branchNode;
1145         
1146         return true;
1147     }
1148     
1149     nonSpeculativeNonPeepholeStrictEq(node, invert);
1150     
1151     return false;
1152 }
1153
1154 static const char* dataFormatString(DataFormat format)
1155 {
1156     // These values correspond to the DataFormat enum.
1157     const char* strings[] = {
1158         "[  ]",
1159         "[ i]",
1160         "[ d]",
1161         "[ c]",
1162         "Err!",
1163         "Err!",
1164         "Err!",
1165         "Err!",
1166         "[J ]",
1167         "[Ji]",
1168         "[Jd]",
1169         "[Jc]",
1170         "Err!",
1171         "Err!",
1172         "Err!",
1173         "Err!",
1174     };
1175     return strings[format];
1176 }
1177
1178 void SpeculativeJIT::dump(const char* label)
1179 {
1180     if (label)
1181         dataLogF("<%s>\n", label);
1182
1183     dataLogF("  gprs:\n");
1184     m_gprs.dump();
1185     dataLogF("  fprs:\n");
1186     m_fprs.dump();
1187     dataLogF("  VirtualRegisters:\n");
1188     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1189         GenerationInfo& info = m_generationInfo[i];
1190         if (info.alive())
1191             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1192         else
1193             dataLogF("    % 3d:[__][__]", i);
1194         if (info.registerFormat() == DataFormatDouble)
1195             dataLogF(":fpr%d\n", info.fpr());
1196         else if (info.registerFormat() != DataFormatNone
1197 #if USE(JSVALUE32_64)
1198             && !(info.registerFormat() & DataFormatJS)
1199 #endif
1200             ) {
1201             ASSERT(info.gpr() != InvalidGPRReg);
1202             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1203         } else
1204             dataLogF("\n");
1205     }
1206     if (label)
1207         dataLogF("</%s>\n", label);
1208 }
1209
1210 GPRTemporary::GPRTemporary()
1211     : m_jit(0)
1212     , m_gpr(InvalidGPRReg)
1213 {
1214 }
1215
1216 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1217     : m_jit(jit)
1218     , m_gpr(InvalidGPRReg)
1219 {
1220     m_gpr = m_jit->allocate();
1221 }
1222
1223 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1224     : m_jit(jit)
1225     , m_gpr(InvalidGPRReg)
1226 {
1227     m_gpr = m_jit->allocate(specific);
1228 }
1229
1230 #if USE(JSVALUE32_64)
1231 GPRTemporary::GPRTemporary(
1232     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1233     : m_jit(jit)
1234     , m_gpr(InvalidGPRReg)
1235 {
1236     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1237         m_gpr = m_jit->reuse(op1.gpr(which));
1238     else
1239         m_gpr = m_jit->allocate();
1240 }
1241 #endif // USE(JSVALUE32_64)
1242
1243 JSValueRegsTemporary::JSValueRegsTemporary() { }
1244
1245 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1246 #if USE(JSVALUE64)
1247     : m_gpr(jit)
1248 #else
1249     : m_payloadGPR(jit)
1250     , m_tagGPR(jit)
1251 #endif
1252 {
1253 }
1254
1255 #if USE(JSVALUE64)
1256 template<typename T>
1257 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord)
1258     : m_gpr(jit, Reuse, operand)
1259 {
1260 }
1261 #else
1262 template<typename T>
1263 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord resultWord)
1264 {
1265     if (resultWord == PayloadWord) {
1266         m_payloadGPR = GPRTemporary(jit, Reuse, operand);
1267         m_tagGPR = GPRTemporary(jit);
1268     } else {
1269         m_payloadGPR = GPRTemporary(jit);
1270         m_tagGPR = GPRTemporary(jit, Reuse, operand);
1271     }
1272 }
1273 #endif
1274
1275 #if USE(JSVALUE64)
1276 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1277 {
1278     m_gpr = GPRTemporary(jit, Reuse, operand);
1279 }
1280 #else
1281 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1282 {
1283     if (jit->canReuse(operand.node())) {
1284         m_payloadGPR = GPRTemporary(jit, Reuse, operand, PayloadWord);
1285         m_tagGPR = GPRTemporary(jit, Reuse, operand, TagWord);
1286     } else {
1287         m_payloadGPR = GPRTemporary(jit);
1288         m_tagGPR = GPRTemporary(jit);
1289     }
1290 }
1291 #endif
1292
1293 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1294
1295 JSValueRegs JSValueRegsTemporary::regs()
1296 {
1297 #if USE(JSVALUE64)
1298     return JSValueRegs(m_gpr.gpr());
1299 #else
1300     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1301 #endif
1302 }
1303
1304 void GPRTemporary::adopt(GPRTemporary& other)
1305 {
1306     ASSERT(!m_jit);
1307     ASSERT(m_gpr == InvalidGPRReg);
1308     ASSERT(other.m_jit);
1309     ASSERT(other.m_gpr != InvalidGPRReg);
1310     m_jit = other.m_jit;
1311     m_gpr = other.m_gpr;
1312     other.m_jit = 0;
1313     other.m_gpr = InvalidGPRReg;
1314 }
1315
1316 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1317     : m_jit(jit)
1318     , m_fpr(InvalidFPRReg)
1319 {
1320     m_fpr = m_jit->fprAllocate();
1321 }
1322
1323 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1324     : m_jit(jit)
1325     , m_fpr(InvalidFPRReg)
1326 {
1327     if (m_jit->canReuse(op1.node()))
1328         m_fpr = m_jit->reuse(op1.fpr());
1329     else
1330         m_fpr = m_jit->fprAllocate();
1331 }
1332
1333 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1334     : m_jit(jit)
1335     , m_fpr(InvalidFPRReg)
1336 {
1337     if (m_jit->canReuse(op1.node()))
1338         m_fpr = m_jit->reuse(op1.fpr());
1339     else if (m_jit->canReuse(op2.node()))
1340         m_fpr = m_jit->reuse(op2.fpr());
1341     else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1342         m_fpr = m_jit->reuse(op1.fpr());
1343     else
1344         m_fpr = m_jit->fprAllocate();
1345 }
1346
1347 #if USE(JSVALUE32_64)
1348 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1349     : m_jit(jit)
1350     , m_fpr(InvalidFPRReg)
1351 {
1352     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1353         m_fpr = m_jit->reuse(op1.fpr());
1354     else
1355         m_fpr = m_jit->fprAllocate();
1356 }
1357 #endif
1358
1359 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1360 {
1361     BasicBlock* taken = branchNode->branchData()->taken.block;
1362     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1363
1364     if (taken == nextBlock()) {
1365         condition = MacroAssembler::invert(condition);
1366         std::swap(taken, notTaken);
1367     }
1368
1369     SpeculateDoubleOperand op1(this, node->child1());
1370     SpeculateDoubleOperand op2(this, node->child2());
1371     
1372     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1373     jump(notTaken);
1374 }
1375
1376 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1377 {
1378     BasicBlock* taken = branchNode->branchData()->taken.block;
1379     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1380
1381     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1382     
1383     if (taken == nextBlock()) {
1384         condition = MacroAssembler::NotEqual;
1385         BasicBlock* tmp = taken;
1386         taken = notTaken;
1387         notTaken = tmp;
1388     }
1389
1390     SpeculateCellOperand op1(this, node->child1());
1391     SpeculateCellOperand op2(this, node->child2());
1392     
1393     GPRReg op1GPR = op1.gpr();
1394     GPRReg op2GPR = op2.gpr();
1395     
1396     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1397         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1398             speculationCheck(
1399                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1400         }
1401         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1402             speculationCheck(
1403                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1404         }
1405     } else {
1406         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1407             speculationCheck(
1408                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1409                 m_jit.branchIfNotObject(op1GPR));
1410         }
1411         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1412             m_jit.branchTest8(
1413                 MacroAssembler::NonZero, 
1414                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1415                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1416
1417         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1418             speculationCheck(
1419                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1420                 m_jit.branchIfNotObject(op2GPR));
1421         }
1422         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1423             m_jit.branchTest8(
1424                 MacroAssembler::NonZero, 
1425                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1426                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1427     }
1428
1429     branchPtr(condition, op1GPR, op2GPR, taken);
1430     jump(notTaken);
1431 }
1432
1433 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1434 {
1435     BasicBlock* taken = branchNode->branchData()->taken.block;
1436     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1437
1438     // The branch instruction will branch to the taken block.
1439     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1440     if (taken == nextBlock()) {
1441         condition = JITCompiler::invert(condition);
1442         BasicBlock* tmp = taken;
1443         taken = notTaken;
1444         notTaken = tmp;
1445     }
1446
1447     if (node->child1()->isInt32Constant()) {
1448         int32_t imm = node->child1()->asInt32();
1449         SpeculateBooleanOperand op2(this, node->child2());
1450         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1451     } else if (node->child2()->isInt32Constant()) {
1452         SpeculateBooleanOperand op1(this, node->child1());
1453         int32_t imm = node->child2()->asInt32();
1454         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1455     } else {
1456         SpeculateBooleanOperand op1(this, node->child1());
1457         SpeculateBooleanOperand op2(this, node->child2());
1458         branch32(condition, op1.gpr(), op2.gpr(), taken);
1459     }
1460
1461     jump(notTaken);
1462 }
1463
1464 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1465 {
1466     BasicBlock* taken = branchNode->branchData()->taken.block;
1467     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1468
1469     // The branch instruction will branch to the taken block.
1470     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1471     if (taken == nextBlock()) {
1472         condition = JITCompiler::invert(condition);
1473         BasicBlock* tmp = taken;
1474         taken = notTaken;
1475         notTaken = tmp;
1476     }
1477
1478     if (node->child1()->isInt32Constant()) {
1479         int32_t imm = node->child1()->asInt32();
1480         SpeculateInt32Operand op2(this, node->child2());
1481         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1482     } else if (node->child2()->isInt32Constant()) {
1483         SpeculateInt32Operand op1(this, node->child1());
1484         int32_t imm = node->child2()->asInt32();
1485         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1486     } else {
1487         SpeculateInt32Operand op1(this, node->child1());
1488         SpeculateInt32Operand op2(this, node->child2());
1489         branch32(condition, op1.gpr(), op2.gpr(), taken);
1490     }
1491
1492     jump(notTaken);
1493 }
1494
1495 // Returns true if the compare is fused with a subsequent branch.
1496 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1497 {
1498     // Fused compare & branch.
1499     unsigned branchIndexInBlock = detectPeepHoleBranch();
1500     if (branchIndexInBlock != UINT_MAX) {
1501         Node* branchNode = m_block->at(branchIndexInBlock);
1502
1503         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1504         // so can be no intervening nodes to also reference the compare. 
1505         ASSERT(node->adjustedRefCount() == 1);
1506
1507         if (node->isBinaryUseKind(Int32Use))
1508             compilePeepHoleInt32Branch(node, branchNode, condition);
1509 #if USE(JSVALUE64)
1510         else if (node->isBinaryUseKind(Int52RepUse))
1511             compilePeepHoleInt52Branch(node, branchNode, condition);
1512 #endif // USE(JSVALUE64)
1513         else if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1514             // Use non-peephole comparison, for now.
1515             return false;
1516         } else if (node->isBinaryUseKind(DoubleRepUse))
1517             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1518         else if (node->op() == CompareEq) {
1519             if (node->isBinaryUseKind(BooleanUse))
1520                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1521             else if (node->isBinaryUseKind(SymbolUse))
1522                 compilePeepHoleSymbolEquality(node, branchNode);
1523             else if (node->isBinaryUseKind(ObjectUse))
1524                 compilePeepHoleObjectEquality(node, branchNode);
1525             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1526                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1527             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1528                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1529             else if (!needsTypeCheck(node->child1(), SpecOther))
1530                 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1531             else if (!needsTypeCheck(node->child2(), SpecOther))
1532                 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1533             else {
1534                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1535                 return true;
1536             }
1537         } else {
1538             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1539             return true;
1540         }
1541
1542         use(node->child1());
1543         use(node->child2());
1544         m_indexInBlock = branchIndexInBlock;
1545         m_currentNode = branchNode;
1546         return true;
1547     }
1548     return false;
1549 }
1550
1551 void SpeculativeJIT::noticeOSRBirth(Node* node)
1552 {
1553     if (!node->hasVirtualRegister())
1554         return;
1555     
1556     VirtualRegister virtualRegister = node->virtualRegister();
1557     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1558     
1559     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1560 }
1561
1562 void SpeculativeJIT::compileMovHint(Node* node)
1563 {
1564     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1565     
1566     Node* child = node->child1().node();
1567     noticeOSRBirth(child);
1568     
1569     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1570 }
1571
1572 void SpeculativeJIT::bail(AbortReason reason)
1573 {
1574     if (verboseCompilationEnabled())
1575         dataLog("Bailing compilation.\n");
1576     m_compileOkay = true;
1577     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1578     clearGenerationInfo();
1579 }
1580
1581 void SpeculativeJIT::compileCurrentBlock()
1582 {
1583     ASSERT(m_compileOkay);
1584     
1585     if (!m_block)
1586         return;
1587     
1588     ASSERT(m_block->isReachable);
1589     
1590     m_jit.blockHeads()[m_block->index] = m_jit.label();
1591
1592     if (!m_block->intersectionOfCFAHasVisited) {
1593         // Don't generate code for basic blocks that are unreachable according to CFA.
1594         // But to be sure that nobody has generated a jump to this block, drop in a
1595         // breakpoint here.
1596         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1597         return;
1598     }
1599
1600     m_stream->appendAndLog(VariableEvent::reset());
1601     
1602     m_jit.jitAssertHasValidCallFrame();
1603     m_jit.jitAssertTagsInPlace();
1604     m_jit.jitAssertArgumentCountSane();
1605
1606     m_state.reset();
1607     m_state.beginBasicBlock(m_block);
1608     
1609     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1610         int operand = m_block->variablesAtHead.operandForIndex(i);
1611         Node* node = m_block->variablesAtHead[i];
1612         if (!node)
1613             continue; // No need to record dead SetLocal's.
1614         
1615         VariableAccessData* variable = node->variableAccessData();
1616         DataFormat format;
1617         if (!node->refCount())
1618             continue; // No need to record dead SetLocal's.
1619         format = dataFormatFor(variable->flushFormat());
1620         m_stream->appendAndLog(
1621             VariableEvent::setLocal(
1622                 VirtualRegister(operand),
1623                 variable->machineLocal(),
1624                 format));
1625     }
1626
1627     m_origin = NodeOrigin();
1628     
1629     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1630         m_currentNode = m_block->at(m_indexInBlock);
1631         
1632         // We may have hit a contradiction that the CFA was aware of but that the JIT
1633         // didn't cause directly.
1634         if (!m_state.isValid()) {
1635             bail(DFGBailedAtTopOfBlock);
1636             return;
1637         }
1638
1639         m_interpreter.startExecuting();
1640         m_interpreter.executeKnownEdgeTypes(m_currentNode);
1641         m_jit.setForNode(m_currentNode);
1642         m_origin = m_currentNode->origin;
1643         if (validationEnabled())
1644             m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1645         m_lastGeneratedNode = m_currentNode->op();
1646         
1647         ASSERT(m_currentNode->shouldGenerate());
1648         
1649         if (verboseCompilationEnabled()) {
1650             dataLogF(
1651                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1652                 (int)m_currentNode->index(),
1653                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1654             dataLog("\n");
1655         }
1656
1657         if (Options::validateDFGExceptionHandling() && (mayExit(m_jit.graph(), m_currentNode) != DoesNotExit || m_currentNode->isTerminal()))
1658             m_jit.jitReleaseAssertNoException();
1659
1660         m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.label(), m_origin.semantic);
1661
1662         compile(m_currentNode);
1663         
1664         if (belongsInMinifiedGraph(m_currentNode->op()))
1665             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1666         
1667 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1668         m_jit.clearRegisterAllocationOffsets();
1669 #endif
1670         
1671         if (!m_compileOkay) {
1672             bail(DFGBailedAtEndOfNode);
1673             return;
1674         }
1675         
1676         // Make sure that the abstract state is rematerialized for the next node.
1677         m_interpreter.executeEffects(m_indexInBlock);
1678     }
1679     
1680     // Perform the most basic verification that children have been used correctly.
1681     if (!ASSERT_DISABLED) {
1682         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1683             GenerationInfo& info = m_generationInfo[index];
1684             RELEASE_ASSERT(!info.alive());
1685         }
1686     }
1687 }
1688
1689 // If we are making type predictions about our arguments then
1690 // we need to check that they are correct on function entry.
1691 void SpeculativeJIT::checkArgumentTypes()
1692 {
1693     ASSERT(!m_currentNode);
1694     m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1695
1696     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1697         Node* node = m_jit.graph().m_arguments[i];
1698         if (!node) {
1699             // The argument is dead. We don't do any checks for such arguments.
1700             continue;
1701         }
1702         
1703         ASSERT(node->op() == SetArgument);
1704         ASSERT(node->shouldGenerate());
1705
1706         VariableAccessData* variableAccessData = node->variableAccessData();
1707         FlushFormat format = variableAccessData->flushFormat();
1708         
1709         if (format == FlushedJSValue)
1710             continue;
1711         
1712         VirtualRegister virtualRegister = variableAccessData->local();
1713
1714         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1715         
1716 #if USE(JSVALUE64)
1717         switch (format) {
1718         case FlushedInt32: {
1719             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1720             break;
1721         }
1722         case FlushedBoolean: {
1723             GPRTemporary temp(this);
1724             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1725             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1726             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1727             break;
1728         }
1729         case FlushedCell: {
1730             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1731             break;
1732         }
1733         default:
1734             RELEASE_ASSERT_NOT_REACHED();
1735             break;
1736         }
1737 #else
1738         switch (format) {
1739         case FlushedInt32: {
1740             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1741             break;
1742         }
1743         case FlushedBoolean: {
1744             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1745             break;
1746         }
1747         case FlushedCell: {
1748             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1749             break;
1750         }
1751         default:
1752             RELEASE_ASSERT_NOT_REACHED();
1753             break;
1754         }
1755 #endif
1756     }
1757
1758     m_origin = NodeOrigin();
1759 }
1760
1761 bool SpeculativeJIT::compile()
1762 {
1763     checkArgumentTypes();
1764     
1765     ASSERT(!m_currentNode);
1766     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1767         m_jit.setForBlockIndex(blockIndex);
1768         m_block = m_jit.graph().block(blockIndex);
1769         compileCurrentBlock();
1770     }
1771     linkBranches();
1772     return true;
1773 }
1774
1775 void SpeculativeJIT::createOSREntries()
1776 {
1777     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1778         BasicBlock* block = m_jit.graph().block(blockIndex);
1779         if (!block)
1780             continue;
1781         if (!block->isOSRTarget)
1782             continue;
1783         
1784         // Currently we don't have OSR entry trampolines. We could add them
1785         // here if need be.
1786         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1787     }
1788 }
1789
1790 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1791 {
1792     unsigned osrEntryIndex = 0;
1793     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1794         BasicBlock* block = m_jit.graph().block(blockIndex);
1795         if (!block)
1796             continue;
1797         if (!block->isOSRTarget)
1798             continue;
1799         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1800     }
1801     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1802     
1803     if (verboseCompilationEnabled()) {
1804         DumpContext dumpContext;
1805         dataLog("OSR Entries:\n");
1806         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1807             dataLog("    ", inContext(entryData, &dumpContext), "\n");
1808         if (!dumpContext.isEmpty())
1809             dumpContext.dump(WTF::dataFile());
1810     }
1811 }
1812
1813 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1814 {
1815     Edge child3 = m_jit.graph().varArgChild(node, 2);
1816     Edge child4 = m_jit.graph().varArgChild(node, 3);
1817
1818     ArrayMode arrayMode = node->arrayMode();
1819     
1820     GPRReg baseReg = base.gpr();
1821     GPRReg propertyReg = property.gpr();
1822     
1823     SpeculateDoubleOperand value(this, child3);
1824
1825     FPRReg valueReg = value.fpr();
1826     
1827     DFG_TYPE_CHECK(
1828         JSValueRegs(), child3, SpecFullRealNumber,
1829         m_jit.branchDouble(
1830             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1831     
1832     if (!m_compileOkay)
1833         return;
1834     
1835     StorageOperand storage(this, child4);
1836     GPRReg storageReg = storage.gpr();
1837
1838     if (node->op() == PutByValAlias) {
1839         // Store the value to the array.
1840         GPRReg propertyReg = property.gpr();
1841         FPRReg valueReg = value.fpr();
1842         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1843         
1844         noResult(m_currentNode);
1845         return;
1846     }
1847     
1848     GPRTemporary temporary;
1849     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1850
1851     MacroAssembler::Jump slowCase;
1852     
1853     if (arrayMode.isInBounds()) {
1854         speculationCheck(
1855             OutOfBounds, JSValueRegs(), 0,
1856             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1857     } else {
1858         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1859         
1860         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1861         
1862         if (!arrayMode.isOutOfBounds())
1863             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1864         
1865         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1866         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1867         
1868         inBounds.link(&m_jit);
1869     }
1870     
1871     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1872
1873     base.use();
1874     property.use();
1875     value.use();
1876     storage.use();
1877     
1878     if (arrayMode.isOutOfBounds()) {
1879         addSlowPathGenerator(
1880             slowPathCall(
1881                 slowCase, this,
1882                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1883                 NoResult, baseReg, propertyReg, valueReg));
1884     }
1885
1886     noResult(m_currentNode, UseChildrenCalledExplicitly);
1887 }
1888
1889 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1890 {
1891     SpeculateCellOperand string(this, node->child1());
1892     SpeculateStrictInt32Operand index(this, node->child2());
1893     StorageOperand storage(this, node->child3());
1894
1895     GPRReg stringReg = string.gpr();
1896     GPRReg indexReg = index.gpr();
1897     GPRReg storageReg = storage.gpr();
1898     
1899     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1900
1901     // unsigned comparison so we can filter out negative indices and indices that are too large
1902     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1903
1904     GPRTemporary scratch(this);
1905     GPRReg scratchReg = scratch.gpr();
1906
1907     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1908
1909     // Load the character into scratchReg
1910     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1911
1912     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1913     JITCompiler::Jump cont8Bit = m_jit.jump();
1914
1915     is16Bit.link(&m_jit);
1916
1917     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1918
1919     cont8Bit.link(&m_jit);
1920
1921     int32Result(scratchReg, m_currentNode);
1922 }
1923
1924 void SpeculativeJIT::compileGetByValOnString(Node* node)
1925 {
1926     SpeculateCellOperand base(this, node->child1());
1927     SpeculateStrictInt32Operand property(this, node->child2());
1928     StorageOperand storage(this, node->child3());
1929     GPRReg baseReg = base.gpr();
1930     GPRReg propertyReg = property.gpr();
1931     GPRReg storageReg = storage.gpr();
1932
1933     GPRTemporary scratch(this);
1934     GPRReg scratchReg = scratch.gpr();
1935 #if USE(JSVALUE32_64)
1936     GPRTemporary resultTag;
1937     GPRReg resultTagReg = InvalidGPRReg;
1938     if (node->arrayMode().isOutOfBounds()) {
1939         GPRTemporary realResultTag(this);
1940         resultTag.adopt(realResultTag);
1941         resultTagReg = resultTag.gpr();
1942     }
1943 #endif
1944
1945     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1946
1947     // unsigned comparison so we can filter out negative indices and indices that are too large
1948     JITCompiler::Jump outOfBounds = m_jit.branch32(
1949         MacroAssembler::AboveOrEqual, propertyReg,
1950         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1951     if (node->arrayMode().isInBounds())
1952         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1953
1954     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1955
1956     // Load the character into scratchReg
1957     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1958
1959     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1960     JITCompiler::Jump cont8Bit = m_jit.jump();
1961
1962     is16Bit.link(&m_jit);
1963
1964     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1965
1966     JITCompiler::Jump bigCharacter =
1967         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1968
1969     // 8 bit string values don't need the isASCII check.
1970     cont8Bit.link(&m_jit);
1971
1972     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1973     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1974     m_jit.loadPtr(scratchReg, scratchReg);
1975
1976     addSlowPathGenerator(
1977         slowPathCall(
1978             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1979
1980     if (node->arrayMode().isOutOfBounds()) {
1981 #if USE(JSVALUE32_64)
1982         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1983 #endif
1984
1985         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1986         bool prototypeChainIsSane = false;
1987         if (globalObject->stringPrototypeChainIsSane()) {
1988             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
1989             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
1990             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
1991             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
1992             // indexed properties either.
1993             // https://bugs.webkit.org/show_bug.cgi?id=144668
1994             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
1995             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
1996             prototypeChainIsSane = globalObject->stringPrototypeChainIsSane();
1997         }
1998         if (prototypeChainIsSane) {
1999             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
2000             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2001             
2002 #if USE(JSVALUE64)
2003             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2004                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
2005 #else
2006             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2007                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
2008                 baseReg, propertyReg));
2009 #endif
2010         } else {
2011 #if USE(JSVALUE64)
2012             addSlowPathGenerator(
2013                 slowPathCall(
2014                     outOfBounds, this, operationGetByValStringInt,
2015                     scratchReg, baseReg, propertyReg));
2016 #else
2017             addSlowPathGenerator(
2018                 slowPathCall(
2019                     outOfBounds, this, operationGetByValStringInt,
2020                     resultTagReg, scratchReg, baseReg, propertyReg));
2021 #endif
2022         }
2023         
2024 #if USE(JSVALUE64)
2025         jsValueResult(scratchReg, m_currentNode);
2026 #else
2027         jsValueResult(resultTagReg, scratchReg, m_currentNode);
2028 #endif
2029     } else
2030         cellResult(scratchReg, m_currentNode);
2031 }
2032
2033 void SpeculativeJIT::compileFromCharCode(Node* node)
2034 {
2035     Edge& child = node->child1();
2036     if (child.useKind() == UntypedUse) {
2037         JSValueOperand opr(this, child);
2038         JSValueRegs oprRegs = opr.jsValueRegs();
2039 #if USE(JSVALUE64)
2040         GPRTemporary result(this);
2041         JSValueRegs resultRegs = JSValueRegs(result.gpr());
2042 #else
2043         GPRTemporary resultTag(this);
2044         GPRTemporary resultPayload(this);
2045         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2046 #endif
2047         flushRegisters();
2048         callOperation(operationStringFromCharCodeUntyped, resultRegs, oprRegs);
2049         m_jit.exceptionCheck();
2050         
2051         jsValueResult(resultRegs, node);
2052         return;
2053     }
2054
2055     SpeculateStrictInt32Operand property(this, child);
2056     GPRReg propertyReg = property.gpr();
2057     GPRTemporary smallStrings(this);
2058     GPRTemporary scratch(this);
2059     GPRReg scratchReg = scratch.gpr();
2060     GPRReg smallStringsReg = smallStrings.gpr();
2061
2062     JITCompiler::JumpList slowCases;
2063     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2064     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2065     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2066
2067     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2068     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2069     cellResult(scratchReg, m_currentNode);
2070 }
2071
2072 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2073 {
2074     VirtualRegister virtualRegister = node->virtualRegister();
2075     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2076
2077     switch (info.registerFormat()) {
2078     case DataFormatStorage:
2079         RELEASE_ASSERT_NOT_REACHED();
2080
2081     case DataFormatBoolean:
2082     case DataFormatCell:
2083         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2084         return GeneratedOperandTypeUnknown;
2085
2086     case DataFormatNone:
2087     case DataFormatJSCell:
2088     case DataFormatJS:
2089     case DataFormatJSBoolean:
2090     case DataFormatJSDouble:
2091         return GeneratedOperandJSValue;
2092
2093     case DataFormatJSInt32:
2094     case DataFormatInt32:
2095         return GeneratedOperandInteger;
2096
2097     default:
2098         RELEASE_ASSERT_NOT_REACHED();
2099         return GeneratedOperandTypeUnknown;
2100     }
2101 }
2102
2103 void SpeculativeJIT::compileValueToInt32(Node* node)
2104 {
2105     switch (node->child1().useKind()) {
2106 #if USE(JSVALUE64)
2107     case Int52RepUse: {
2108         SpeculateStrictInt52Operand op1(this, node->child1());
2109         GPRTemporary result(this, Reuse, op1);
2110         GPRReg op1GPR = op1.gpr();
2111         GPRReg resultGPR = result.gpr();
2112         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
2113         int32Result(resultGPR, node, DataFormatInt32);
2114         return;
2115     }
2116 #endif // USE(JSVALUE64)
2117         
2118     case DoubleRepUse: {
2119         GPRTemporary result(this);
2120         SpeculateDoubleOperand op1(this, node->child1());
2121         FPRReg fpr = op1.fpr();
2122         GPRReg gpr = result.gpr();
2123         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2124         
2125         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, operationToInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
2126         
2127         int32Result(gpr, node);
2128         return;
2129     }
2130     
2131     case NumberUse:
2132     case NotCellUse: {
2133         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2134         case GeneratedOperandInteger: {
2135             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2136             GPRTemporary result(this, Reuse, op1);
2137             m_jit.move(op1.gpr(), result.gpr());
2138             int32Result(result.gpr(), node, op1.format());
2139             return;
2140         }
2141         case GeneratedOperandJSValue: {
2142             GPRTemporary result(this);
2143 #if USE(JSVALUE64)
2144             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2145
2146             GPRReg gpr = op1.gpr();
2147             GPRReg resultGpr = result.gpr();
2148             FPRTemporary tempFpr(this);
2149             FPRReg fpr = tempFpr.fpr();
2150
2151             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2152             JITCompiler::JumpList converted;
2153
2154             if (node->child1().useKind() == NumberUse) {
2155                 DFG_TYPE_CHECK(
2156                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
2157                     m_jit.branchTest64(
2158                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2159             } else {
2160                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2161                 
2162                 DFG_TYPE_CHECK(
2163                     JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
2164                 
2165                 // It's not a cell: so true turns into 1 and all else turns into 0.
2166                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2167                 converted.append(m_jit.jump());
2168                 
2169                 isNumber.link(&m_jit);
2170             }
2171
2172             // First, if we get here we have a double encoded as a JSValue
2173             unboxDouble(gpr, resultGpr, fpr);
2174
2175             silentSpillAllRegisters(resultGpr);
2176             callOperation(operationToInt32, resultGpr, fpr);
2177             silentFillAllRegisters(resultGpr);
2178
2179             converted.append(m_jit.jump());
2180
2181             isInteger.link(&m_jit);
2182             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2183
2184             converted.link(&m_jit);
2185 #else
2186             Node* childNode = node->child1().node();
2187             VirtualRegister virtualRegister = childNode->virtualRegister();
2188             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2189
2190             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2191
2192             GPRReg payloadGPR = op1.payloadGPR();
2193             GPRReg resultGpr = result.gpr();
2194         
2195             JITCompiler::JumpList converted;
2196
2197             if (info.registerFormat() == DataFormatJSInt32)
2198                 m_jit.move(payloadGPR, resultGpr);
2199             else {
2200                 GPRReg tagGPR = op1.tagGPR();
2201                 FPRTemporary tempFpr(this);
2202                 FPRReg fpr = tempFpr.fpr();
2203                 FPRTemporary scratch(this);
2204
2205                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2206
2207                 if (node->child1().useKind() == NumberUse) {
2208                     DFG_TYPE_CHECK(
2209                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2210                         m_jit.branch32(
2211                             MacroAssembler::AboveOrEqual, tagGPR,
2212                             TrustedImm32(JSValue::LowestTag)));
2213                 } else {
2214                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2215                     
2216                     DFG_TYPE_CHECK(
2217                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2218                         m_jit.branchIfCell(op1.jsValueRegs()));
2219                     
2220                     // It's not a cell: so true turns into 1 and all else turns into 0.
2221                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2222                     m_jit.move(TrustedImm32(0), resultGpr);
2223                     converted.append(m_jit.jump());
2224                     
2225                     isBoolean.link(&m_jit);
2226                     m_jit.move(payloadGPR, resultGpr);
2227                     converted.append(m_jit.jump());
2228                     
2229                     isNumber.link(&m_jit);
2230                 }
2231
2232                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2233
2234                 silentSpillAllRegisters(resultGpr);
2235                 callOperation(operationToInt32, resultGpr, fpr);
2236                 silentFillAllRegisters(resultGpr);
2237
2238                 converted.append(m_jit.jump());
2239
2240                 isInteger.link(&m_jit);
2241                 m_jit.move(payloadGPR, resultGpr);
2242
2243                 converted.link(&m_jit);
2244             }
2245 #endif
2246             int32Result(resultGpr, node);
2247             return;
2248         }
2249         case GeneratedOperandTypeUnknown:
2250             RELEASE_ASSERT(!m_compileOkay);
2251             return;
2252         }
2253         RELEASE_ASSERT_NOT_REACHED();
2254         return;
2255     }
2256     
2257     default:
2258         ASSERT(!m_compileOkay);
2259         return;
2260     }
2261 }
2262
2263 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2264 {
2265     if (doesOverflow(node->arithMode())) {
2266         if (enableInt52()) {
2267             SpeculateInt32Operand op1(this, node->child1());
2268             GPRTemporary result(this, Reuse, op1);
2269             m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr());
2270             strictInt52Result(result.gpr(), node);
2271             return;
2272         }
2273         SpeculateInt32Operand op1(this, node->child1());
2274         FPRTemporary result(this);
2275             
2276         GPRReg inputGPR = op1.gpr();
2277         FPRReg outputFPR = result.fpr();
2278             
2279         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2280             
2281         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2282         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2283         positive.link(&m_jit);
2284             
2285         doubleResult(outputFPR, node);
2286         return;
2287     }
2288     
2289     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2290
2291     SpeculateInt32Operand op1(this, node->child1());
2292     GPRTemporary result(this);
2293
2294     m_jit.move(op1.gpr(), result.gpr());
2295
2296     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2297
2298     int32Result(result.gpr(), node, op1.format());
2299 }
2300
2301 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2302 {
2303     SpeculateDoubleOperand op1(this, node->child1());
2304     FPRTemporary scratch(this);
2305     GPRTemporary result(this);
2306     
2307     FPRReg valueFPR = op1.fpr();
2308     FPRReg scratchFPR = scratch.fpr();
2309     GPRReg resultGPR = result.gpr();
2310
2311     JITCompiler::JumpList failureCases;
2312     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2313     m_jit.branchConvertDoubleToInt32(
2314         valueFPR, resultGPR, failureCases, scratchFPR,
2315         shouldCheckNegativeZero(node->arithMode()));
2316     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2317
2318     int32Result(resultGPR, node);
2319 }
2320
2321 void SpeculativeJIT::compileDoubleRep(Node* node)
2322 {
2323     switch (node->child1().useKind()) {
2324     case RealNumberUse: {
2325         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2326         FPRTemporary result(this);
2327         
2328         JSValueRegs op1Regs = op1.jsValueRegs();
2329         FPRReg resultFPR = result.fpr();
2330         
2331 #if USE(JSVALUE64)
2332         GPRTemporary temp(this);
2333         GPRReg tempGPR = temp.gpr();
2334         m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR);
2335 #else
2336         FPRTemporary temp(this);
2337         FPRReg tempFPR = temp.fpr();
2338         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2339 #endif
2340         
2341         JITCompiler::Jump done = m_jit.branchDouble(
2342             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2343         
2344         DFG_TYPE_CHECK(
2345             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2346         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2347         
2348         done.link(&m_jit);
2349         
2350         doubleResult(resultFPR, node);
2351         return;
2352     }
2353     
2354     case NotCellUse:
2355     case NumberUse: {
2356         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2357
2358         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2359         if (isInt32Speculation(possibleTypes)) {
2360             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2361             FPRTemporary result(this);
2362             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2363             doubleResult(result.fpr(), node);
2364             return;
2365         }
2366
2367         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2368         FPRTemporary result(this);
2369
2370 #if USE(JSVALUE64)
2371         GPRTemporary temp(this);
2372
2373         GPRReg op1GPR = op1.gpr();
2374         GPRReg tempGPR = temp.gpr();
2375         FPRReg resultFPR = result.fpr();
2376         JITCompiler::JumpList done;
2377
2378         JITCompiler::Jump isInteger = m_jit.branch64(
2379             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2380
2381         if (node->child1().useKind() == NotCellUse) {
2382             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2383             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2384
2385             static const double zero = 0;
2386             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2387
2388             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2389             done.append(isNull);
2390
2391             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2392                 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2393
2394             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2395             static const double one = 1;
2396             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2397             done.append(m_jit.jump());
2398             done.append(isFalse);
2399
2400             isUndefined.link(&m_jit);
2401             static const double NaN = PNaN;
2402             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2403             done.append(m_jit.jump());
2404
2405             isNumber.link(&m_jit);
2406         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2407             typeCheck(
2408                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2409                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2410         }
2411
2412         unboxDouble(op1GPR, tempGPR, resultFPR);
2413         done.append(m_jit.jump());
2414     
2415         isInteger.link(&m_jit);
2416         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2417         done.link(&m_jit);
2418 #else // USE(JSVALUE64) -> this is the 32_64 case
2419         FPRTemporary temp(this);
2420     
2421         GPRReg op1TagGPR = op1.tagGPR();
2422         GPRReg op1PayloadGPR = op1.payloadGPR();
2423         FPRReg tempFPR = temp.fpr();
2424         FPRReg resultFPR = result.fpr();
2425         JITCompiler::JumpList done;
2426     
2427         JITCompiler::Jump isInteger = m_jit.branch32(
2428             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2429
2430         if (node->child1().useKind() == NotCellUse) {
2431             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2432             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2433
2434             static const double zero = 0;
2435             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2436
2437             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2438             done.append(isNull);
2439
2440             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2441
2442             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2443             static const double one = 1;
2444             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2445             done.append(m_jit.jump());
2446             done.append(isFalse);
2447
2448             isUndefined.link(&m_jit);
2449             static const double NaN = PNaN;
2450             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2451             done.append(m_jit.jump());
2452
2453             isNumber.link(&m_jit);
2454         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2455             typeCheck(
2456                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2457                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2458         }
2459
2460         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2461         done.append(m_jit.jump());
2462     
2463         isInteger.link(&m_jit);
2464         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2465         done.link(&m_jit);
2466 #endif // USE(JSVALUE64)
2467     
2468         doubleResult(resultFPR, node);
2469         return;
2470     }
2471         
2472 #if USE(JSVALUE64)
2473     case Int52RepUse: {
2474         SpeculateStrictInt52Operand value(this, node->child1());
2475         FPRTemporary result(this);
2476         
2477         GPRReg valueGPR = value.gpr();
2478         FPRReg resultFPR = result.fpr();
2479
2480         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2481         
2482         doubleResult(resultFPR, node);
2483         return;
2484     }
2485 #endif // USE(JSVALUE64)
2486         
2487     default:
2488         RELEASE_ASSERT_NOT_REACHED();
2489         return;
2490     }
2491 }
2492
2493 void SpeculativeJIT::compileValueRep(Node* node)
2494 {
2495     switch (node->child1().useKind()) {
2496     case DoubleRepUse: {
2497         SpeculateDoubleOperand value(this, node->child1());
2498         JSValueRegsTemporary result(this);
2499         
2500         FPRReg valueFPR = value.fpr();
2501         JSValueRegs resultRegs = result.regs();
2502         
2503         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2504         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2505         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2506         // local was purified.
2507         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2508             m_jit.purifyNaN(valueFPR);
2509
2510         boxDouble(valueFPR, resultRegs);
2511         
2512         jsValueResult(resultRegs, node);
2513         return;
2514     }
2515         
2516 #if USE(JSVALUE64)
2517     case Int52RepUse: {
2518         SpeculateStrictInt52Operand value(this, node->child1());
2519         GPRTemporary result(this);
2520         
2521         GPRReg valueGPR = value.gpr();
2522         GPRReg resultGPR = result.gpr();
2523         
2524         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2525         
2526         jsValueResult(resultGPR, node);
2527         return;
2528     }
2529 #endif // USE(JSVALUE64)
2530         
2531     default:
2532         RELEASE_ASSERT_NOT_REACHED();
2533         return;
2534     }
2535 }
2536
2537 static double clampDoubleToByte(double d)
2538 {
2539     d += 0.5;
2540     if (!(d > 0))
2541         d = 0;
2542     else if (d > 255)
2543         d = 255;
2544     return d;
2545 }
2546
2547 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2548 {
2549     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2550     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2551     jit.xorPtr(result, result);
2552     MacroAssembler::Jump clamped = jit.jump();
2553     tooBig.link(&jit);
2554     jit.move(JITCompiler::TrustedImm32(255), result);
2555     clamped.link(&jit);
2556     inBounds.link(&jit);
2557 }
2558
2559 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2560 {
2561     // Unordered compare so we pick up NaN
2562     static const double zero = 0;
2563     static const double byteMax = 255;
2564     static const double half = 0.5;
2565     jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2566     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2567     jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2568     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2569     
2570     jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2571     // FIXME: This should probably just use a floating point round!
2572     // https://bugs.webkit.org/show_bug.cgi?id=72054
2573     jit.addDouble(source, scratch);
2574     jit.truncateDoubleToInt32(scratch, result);   
2575     MacroAssembler::Jump truncatedInt = jit.jump();
2576     
2577     tooSmall.link(&jit);
2578     jit.xorPtr(result, result);
2579     MacroAssembler::Jump zeroed = jit.jump();
2580     
2581     tooBig.link(&jit);
2582     jit.move(JITCompiler::TrustedImm32(255), result);
2583     
2584     truncatedInt.link(&jit);
2585     zeroed.link(&jit);
2586
2587 }
2588
2589 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2590 {
2591     if (node->op() == PutByValAlias)
2592         return JITCompiler::Jump();
2593     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2594         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2595     if (view) {
2596         uint32_t length = view->length();
2597         Node* indexNode = m_jit.graph().child(node, 1).node();
2598         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2599             return JITCompiler::Jump();
2600         return m_jit.branch32(
2601             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2602     }
2603     return m_jit.branch32(
2604         MacroAssembler::AboveOrEqual, indexGPR,
2605         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2606 }
2607
2608 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2609 {
2610     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2611     if (!jump.isSet())
2612         return;
2613     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2614 }
2615
2616 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayIsNeuteredIfOutOfBounds(Node* node, GPRReg base, JITCompiler::Jump outOfBounds)
2617 {
2618     JITCompiler::Jump done;
2619     if (outOfBounds.isSet()) {
2620         done = m_jit.jump();
2621         if (node->arrayMode().isInBounds())
2622             speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2623         else {
2624             outOfBounds.link(&m_jit);
2625
2626             JITCompiler::Jump notWasteful = m_jit.branch32(
2627                 MacroAssembler::NotEqual,
2628                 MacroAssembler::Address(base, JSArrayBufferView::offsetOfMode()),
2629                 TrustedImm32(WastefulTypedArray));
2630
2631             JITCompiler::Jump hasNullVector = m_jit.branchTestPtr(
2632                 MacroAssembler::Zero,
2633                 MacroAssembler::Address(base, JSArrayBufferView::offsetOfVector()));
2634             speculationCheck(Uncountable, JSValueSource(), node, hasNullVector);
2635             notWasteful.link(&m_jit);
2636         }
2637     }
2638     return done;
2639 }
2640
2641 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2642 {
2643     ASSERT(isInt(type));
2644     
2645     SpeculateCellOperand base(this, node->child1());
2646     SpeculateStrictInt32Operand property(this, node->child2());
2647     StorageOperand storage(this, node->child3());
2648
2649     GPRReg baseReg = base.gpr();
2650     GPRReg propertyReg = property.gpr();
2651     GPRReg storageReg = storage.gpr();
2652
2653     GPRTemporary result(this);
2654     GPRReg resultReg = result.gpr();
2655
2656     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2657
2658     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2659     switch (elementSize(type)) {
2660     case 1:
2661         if (isSigned(type))
2662             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2663         else
2664             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2665         break;
2666     case 2:
2667         if (isSigned(type))
2668             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2669         else
2670             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2671         break;
2672     case 4:
2673         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2674         break;
2675     default:
2676         CRASH();
2677     }
2678     if (elementSize(type) < 4 || isSigned(type)) {
2679         int32Result(resultReg, node);
2680         return;
2681     }
2682     
2683     ASSERT(elementSize(type) == 4 && !isSigned(type));
2684     if (node->shouldSpeculateInt32()) {
2685         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2686         int32Result(resultReg, node);
2687         return;
2688     }
2689     
2690 #if USE(JSVALUE64)
2691     if (node->shouldSpeculateAnyInt()) {
2692         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2693         strictInt52Result(resultReg, node);
2694         return;
2695     }
2696 #endif
2697     
2698     FPRTemporary fresult(this);
2699     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2700     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2701     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2702     positive.link(&m_jit);
2703     doubleResult(fresult.fpr(), node);
2704 }
2705
2706 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2707 {
2708     ASSERT(isInt(type));
2709     
2710     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2711     GPRReg storageReg = storage.gpr();
2712     
2713     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2714     
2715     GPRTemporary value;
2716     GPRReg valueGPR = InvalidGPRReg;
2717     
2718     if (valueUse->isConstant()) {
2719         JSValue jsValue = valueUse->asJSValue();
2720         if (!jsValue.isNumber()) {
2721             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2722             noResult(node);
2723             return;
2724         }
2725         double d = jsValue.asNumber();
2726         if (isClamped(type)) {
2727             ASSERT(elementSize(type) == 1);
2728             d = clampDoubleToByte(d);
2729         }
2730         GPRTemporary scratch(this);
2731         GPRReg scratchReg = scratch.gpr();
2732         m_jit.move(Imm32(toInt32(d)), scratchReg);
2733         value.adopt(scratch);
2734         valueGPR = scratchReg;
2735     } else {
2736         switch (valueUse.useKind()) {
2737         case Int32Use: {
2738             SpeculateInt32Operand valueOp(this, valueUse);
2739             GPRTemporary scratch(this);
2740             GPRReg scratchReg = scratch.gpr();
2741             m_jit.move(valueOp.gpr(), scratchReg);
2742             if (isClamped(type)) {
2743                 ASSERT(elementSize(type) == 1);
2744                 compileClampIntegerToByte(m_jit, scratchReg);
2745             }
2746             value.adopt(scratch);
2747             valueGPR = scratchReg;
2748             break;
2749         }
2750             
2751 #if USE(JSVALUE64)
2752         case Int52RepUse: {
2753             SpeculateStrictInt52Operand valueOp(this, valueUse);
2754             GPRTemporary scratch(this);
2755             GPRReg scratchReg = scratch.gpr();
2756             m_jit.move(valueOp.gpr(), scratchReg);
2757             if (isClamped(type)) {
2758                 ASSERT(elementSize(type) == 1);
2759                 MacroAssembler::Jump inBounds = m_jit.branch64(
2760                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2761                 MacroAssembler::Jump tooBig = m_jit.branch64(
2762                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2763                 m_jit.move(TrustedImm32(0), scratchReg);
2764                 MacroAssembler::Jump clamped = m_jit.jump();
2765                 tooBig.link(&m_jit);
2766                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2767                 clamped.link(&m_jit);
2768                 inBounds.link(&m_jit);
2769             }
2770             value.adopt(scratch);
2771             valueGPR = scratchReg;
2772             break;
2773         }
2774 #endif // USE(JSVALUE64)
2775             
2776         case DoubleRepUse: {
2777             if (isClamped(type)) {
2778                 ASSERT(elementSize(type) == 1);
2779                 SpeculateDoubleOperand valueOp(this, valueUse);
2780                 GPRTemporary result(this);
2781                 FPRTemporary floatScratch(this);
2782                 FPRReg fpr = valueOp.fpr();
2783                 GPRReg gpr = result.gpr();
2784                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2785                 value.adopt(result);
2786                 valueGPR = gpr;
2787             } else {
2788                 SpeculateDoubleOperand valueOp(this, valueUse);
2789                 GPRTemporary result(this);
2790                 FPRReg fpr = valueOp.fpr();
2791                 GPRReg gpr = result.gpr();
2792                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2793                 m_jit.xorPtr(gpr, gpr);
2794                 MacroAssembler::Jump fixed = m_jit.jump();
2795                 notNaN.link(&m_jit);
2796                 
2797                 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2798                     fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2799                 
2800                 addSlowPathGenerator(slowPathCall(failed, this, operationToInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
2801                 
2802                 fixed.link(&m_jit);
2803                 value.adopt(result);
2804                 valueGPR = gpr;
2805             }
2806             break;
2807         }
2808             
2809         default:
2810             RELEASE_ASSERT_NOT_REACHED();
2811             break;
2812         }
2813     }
2814     
2815     ASSERT_UNUSED(valueGPR, valueGPR != property);
2816     ASSERT(valueGPR != base);
2817     ASSERT(valueGPR != storageReg);
2818     JITCompiler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2819
2820     switch (elementSize(type)) {
2821     case 1:
2822         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2823         break;
2824     case 2:
2825         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2826         break;
2827     case 4:
2828         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2829         break;
2830     default:
2831         CRASH();
2832     }
2833
2834     JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
2835     if (done.isSet())
2836         done.link(&m_jit);
2837     noResult(node);
2838 }
2839
2840 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2841 {
2842     ASSERT(isFloat(type));
2843     
2844     SpeculateCellOperand base(this, node->child1());
2845     SpeculateStrictInt32Operand property(this, node->child2());
2846     StorageOperand storage(this, node->child3());
2847
2848     GPRReg baseReg = base.gpr();
2849     GPRReg propertyReg = property.gpr();
2850     GPRReg storageReg = storage.gpr();
2851
2852     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2853
2854     FPRTemporary result(this);
2855     FPRReg resultReg = result.fpr();
2856     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2857     switch (elementSize(type)) {
2858     case 4:
2859         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2860         m_jit.convertFloatToDouble(resultReg, resultReg);
2861         break;
2862     case 8: {
2863         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2864         break;
2865     }
2866     default:
2867         RELEASE_ASSERT_NOT_REACHED();
2868     }
2869     
2870     doubleResult(resultReg, node);
2871 }
2872
2873 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2874 {
2875     ASSERT(isFloat(type));
2876     
2877     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2878     GPRReg storageReg = storage.gpr();
2879     
2880     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2881     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2882
2883     SpeculateDoubleOperand valueOp(this, valueUse);
2884     FPRTemporary scratch(this);
2885     FPRReg valueFPR = valueOp.fpr();
2886     FPRReg scratchFPR = scratch.fpr();
2887
2888     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2889     
2890     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2891     
2892     switch (elementSize(type)) {
2893     case 4: {
2894         m_jit.moveDouble(valueFPR, scratchFPR);
2895         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2896         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2897         break;
2898     }
2899     case 8:
2900         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2901         break;
2902     default:
2903         RELEASE_ASSERT_NOT_REACHED();
2904     }
2905
2906     JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
2907     if (done.isSet())
2908         done.link(&m_jit);
2909     noResult(node);
2910 }
2911
2912 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2913 {
2914     // Check that prototype is an object.
2915     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
2916     
2917     // Initialize scratchReg with the value being checked.
2918     m_jit.move(valueReg, scratchReg);
2919     
2920     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2921     MacroAssembler::Label loop(&m_jit);
2922     MacroAssembler::Jump performDefaultHasInstance = m_jit.branch8(MacroAssembler::Equal,
2923         MacroAssembler::Address(scratchReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType));
2924     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2925     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2926     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2927 #if USE(JSVALUE64)
2928     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2929 #else
2930     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2931 #endif
2932     
2933     // No match - result is false.
2934 #if USE(JSVALUE64)
2935     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2936 #else
2937     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2938 #endif
2939     MacroAssembler::JumpList doneJumps; 
2940     doneJumps.append(m_jit.jump());
2941
2942     performDefaultHasInstance.link(&m_jit);
2943     silentSpillAllRegisters(scratchReg);
2944     callOperation(operationDefaultHasInstance, scratchReg, valueReg, prototypeReg); 
2945     silentFillAllRegisters(scratchReg);
2946     m_jit.exceptionCheck();
2947 #if USE(JSVALUE64)
2948     m_jit.or32(TrustedImm32(ValueFalse), scratchReg);
2949 #endif
2950     doneJumps.append(m_jit.jump());
2951     
2952     isInstance.link(&m_jit);
2953 #if USE(JSVALUE64)
2954     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2955 #else
2956     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2957 #endif
2958     
2959     doneJumps.link(&m_jit);
2960 }
2961
2962 void SpeculativeJIT::compileCheckTypeInfoFlags(Node* node)
2963 {
2964     SpeculateCellOperand base(this, node->child1());
2965
2966     GPRReg baseGPR = base.gpr();
2967
2968     speculationCheck(BadTypeInfoFlags, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(node->typeInfoOperand())));
2969
2970     noResult(node);
2971 }
2972
2973 void SpeculativeJIT::compileInstanceOf(Node* node)
2974 {
2975     if (node->child1().useKind() == UntypedUse) {
2976         // It might not be a cell. Speculate less aggressively.
2977         // Or: it might only be used once (i.e. by us), so we get zero benefit
2978         // from speculating any more aggressively than we absolutely need to.
2979         
2980         JSValueOperand value(this, node->child1());
2981         SpeculateCellOperand prototype(this, node->child2());
2982         GPRTemporary scratch(this);
2983         GPRTemporary scratch2(this);
2984         
2985         GPRReg prototypeReg = prototype.gpr();
2986         GPRReg scratchReg = scratch.gpr();
2987         GPRReg scratch2Reg = scratch2.gpr();
2988         
2989         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
2990         GPRReg valueReg = value.jsValueRegs().payloadGPR();
2991         moveFalseTo(scratchReg);
2992
2993         MacroAssembler::Jump done = m_jit.jump();
2994         
2995         isCell.link(&m_jit);
2996         
2997         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2998         
2999         done.link(&m_jit);
3000
3001         blessedBooleanResult(scratchReg, node);
3002         return;
3003     }
3004     
3005     SpeculateCellOperand value(this, node->child1());
3006     SpeculateCellOperand prototype(this, node->child2());
3007     
3008     GPRTemporary scratch(this);
3009     GPRTemporary scratch2(this);
3010     
3011     GPRReg valueReg = value.gpr();
3012     GPRReg prototypeReg = prototype.gpr();
3013     GPRReg scratchReg = scratch.gpr();
3014     GPRReg scratch2Reg = scratch2.gpr();
3015     
3016     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
3017
3018     blessedBooleanResult(scratchReg, node);
3019 }
3020
3021 template<typename SnippetGenerator, J_JITOperation_EJJ snippetSlowPathFunction>
3022 void SpeculativeJIT::emitUntypedBitOp(Node* node)
3023 {
3024     Edge& leftChild = node->child1();
3025     Edge& rightChild = node->child2();
3026
3027     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3028         JSValueOperand left(this, leftChild);
3029         JSValueOperand right(this, rightChild);
3030         JSValueRegs leftRegs = left.jsValueRegs();
3031         JSValueRegs rightRegs = right.jsValueRegs();
3032 #if USE(JSVALUE64)
3033         GPRTemporary result(this);
3034         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3035 #else
3036         GPRTemporary resultTag(this);
3037         GPRTemporary resultPayload(this);
3038         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3039 #endif
3040         flushRegisters();
3041         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3042         m_jit.exceptionCheck();
3043
3044         jsValueResult(resultRegs, node);
3045         return;
3046     }
3047
3048     Optional<JSValueOperand> left;
3049     Optional<JSValueOperand> right;
3050
3051     JSValueRegs leftRegs;
3052     JSValueRegs rightRegs;
3053
3054 #if USE(JSVALUE64)
3055     GPRTemporary result(this);
3056     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3057     GPRTemporary scratch(this);
3058     GPRReg scratchGPR = scratch.gpr();
3059 #else
3060     GPRTemporary resultTag(this);
3061     GPRTemporary resultPayload(this);
3062     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3063     GPRReg scratchGPR = resultTag.gpr();
3064 #endif
3065
3066     SnippetOperand leftOperand;
3067     SnippetOperand rightOperand;
3068
3069     // The snippet generator does not support both operands being constant. If the left
3070     // operand is already const, we'll ignore the right operand's constness.
3071     if (leftChild->isInt32Constant())
3072         leftOperand.setConstInt32(leftChild->asInt32());
3073     else if (rightChild->isInt32Constant())
3074         rightOperand.setConstInt32(rightChild->asInt32());
3075
3076     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3077
3078     if (!leftOperand.isConst()) {
3079         left = JSValueOperand(this, leftChild);
3080         leftRegs = left->jsValueRegs();
3081     }
3082     if (!rightOperand.isConst()) {
3083         right = JSValueOperand(this, rightChild);
3084         rightRegs = right->jsValueRegs();
3085     }
3086
3087     SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
3088     gen.generateFastPath(m_jit);
3089
3090     ASSERT(gen.didEmitFastPath());
3091     gen.endJumpList().append(m_jit.jump());
3092
3093     gen.slowPathJumpList().link(&m_jit);
3094     silentSpillAllRegisters(resultRegs);
3095
3096     if (leftOperand.isConst()) {
3097         leftRegs = resultRegs;
3098         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3099     } else if (rightOperand.isConst()) {
3100         rightRegs = resultRegs;
3101         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3102     }
3103
3104     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3105
3106     silentFillAllRegisters(resultRegs);
3107     m_jit.exceptionCheck();
3108
3109     gen.endJumpList().link(&m_jit);
3110     jsValueResult(resultRegs, node);
3111 }
3112
3113 void SpeculativeJIT::compileBitwiseOp(Node* node)
3114 {
3115     NodeType op = node->op();
3116     Edge& leftChild = node->child1();
3117     Edge& rightChild = node->child2();
3118
3119     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3120         switch (op) {
3121         case BitAnd:
3122             emitUntypedBitOp<JITBitAndGenerator, operationValueBitAnd>(node);
3123             return;
3124         case BitOr:
3125             emitUntypedBitOp<JITBitOrGenerator, operationValueBitOr>(node);
3126             return;
3127         case BitXor:
3128             emitUntypedBitOp<JITBitXorGenerator, operationValueBitXor>(node);
3129             return;
3130         default:
3131             RELEASE_ASSERT_NOT_REACHED();
3132         }
3133     }
3134
3135     if (leftChild->isInt32Constant()) {
3136         SpeculateInt32Operand op2(this, rightChild);
3137         GPRTemporary result(this, Reuse, op2);
3138
3139         bitOp(op, leftChild->asInt32(), op2.gpr(), result.gpr());
3140
3141         int32Result(result.gpr(), node);
3142
3143     } else if (rightChild->isInt32Constant()) {
3144         SpeculateInt32Operand op1(this, leftChild);
3145         GPRTemporary result(this, Reuse, op1);
3146
3147         bitOp(op, rightChild->asInt32(), op1.gpr(), result.gpr());
3148
3149         int32Result(result.gpr(), node);
3150
3151     } else {
3152         SpeculateInt32Operand op1(this, leftChild);
3153         SpeculateInt32Operand op2(this, rightChild);
3154         GPRTemporary result(this, Reuse, op1, op2);
3155         
3156         GPRReg reg1 = op1.gpr();
3157         GPRReg reg2 = op2.gpr();
3158         bitOp(op, reg1, reg2, result.gpr());
3159         
3160         int32Result(result.gpr(), node);
3161     }
3162 }
3163
3164 void SpeculativeJIT::emitUntypedRightShiftBitOp(Node* node)
3165 {
3166     J_JITOperation_EJJ snippetSlowPathFunction = node->op() == BitRShift
3167         ? operationValueBitRShift : operationValueBitURShift;
3168     JITRightShiftGenerator::ShiftType shiftType = node->op() == BitRShift
3169         ? JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift;
3170
3171     Edge& leftChild = node->child1();
3172     Edge& rightChild = node->child2();
3173
3174     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3175         JSValueOperand left(this, leftChild);
3176         JSValueOperand right(this, rightChild);
3177         JSValueRegs leftRegs = left.jsValueRegs();
3178         JSValueRegs rightRegs = right.jsValueRegs();
3179 #if USE(JSVALUE64)
3180         GPRTemporary result(this);
3181         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3182 #else
3183         GPRTemporary resultTag(this);
3184         GPRTemporary resultPayload(this);
3185         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3186 #endif
3187         flushRegisters();
3188         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3189         m_jit.exceptionCheck();
3190
3191         jsValueResult(resultRegs, node);
3192         return;
3193     }
3194
3195     Optional<JSValueOperand> left;
3196     Optional<JSValueOperand> right;
3197
3198     JSValueRegs leftRegs;
3199     JSValueRegs rightRegs;
3200
3201     FPRTemporary leftNumber(this);
3202     FPRReg leftFPR = leftNumber.fpr();
3203
3204 #if USE(JSVALUE64)
3205     GPRTemporary result(this);
3206     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3207     GPRTemporary scratch(this);
3208     GPRReg scratchGPR = scratch.gpr();
3209     FPRReg scratchFPR = InvalidFPRReg;
3210 #else
3211     GPRTemporary resultTag(this);
3212     GPRTemporary resultPayload(this);
3213     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3214     GPRReg scratchGPR = resultTag.gpr();
3215     FPRTemporary fprScratch(this);
3216     FPRReg scratchFPR = fprScratch.fpr();
3217 #endif
3218
3219     SnippetOperand leftOperand;
3220     SnippetOperand rightOperand;
3221
3222     // The snippet generator does not support both operands being constant. If the left
3223     // operand is already const, we'll ignore the right operand's constness.
3224     if (leftChild->isInt32Constant())
3225         leftOperand.setConstInt32(leftChild->asInt32());
3226     else if (rightChild->isInt32Constant())
3227         rightOperand.setConstInt32(rightChild->asInt32());
3228
3229     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3230
3231     if (!leftOperand.isConst()) {
3232         left = JSValueOperand(this, leftChild);
3233         leftRegs = left->jsValueRegs();
3234     }
3235     if (!rightOperand.isConst()) {
3236         right = JSValueOperand(this, rightChild);
3237         rightRegs = right->jsValueRegs();
3238     }
3239
3240     JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3241         leftFPR, scratchGPR, scratchFPR, shiftType);
3242     gen.generateFastPath(m_jit);
3243
3244     ASSERT(gen.didEmitFastPath());
3245     gen.endJumpList().append(m_jit.jump());
3246
3247     gen.slowPathJumpList().link(&m_jit);
3248     silentSpillAllRegisters(resultRegs);
3249
3250     if (leftOperand.isConst()) {
3251         leftRegs = resultRegs;
3252         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3253     } else if (rightOperand.isConst()) {
3254         rightRegs = resultRegs;
3255         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3256     }
3257
3258     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3259
3260     silentFillAllRegisters(resultRegs);
3261     m_jit.exceptionCheck();
3262
3263     gen.endJumpList().link(&m_jit);
3264     jsValueResult(resultRegs, node);
3265     return;
3266 }
3267
3268 void SpeculativeJIT::compileShiftOp(Node* node)
3269 {
3270     NodeType op = node->op();
3271     Edge& leftChild = node->child1();
3272     Edge& rightChild = node->child2();
3273
3274     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3275         switch (op) {
3276         case BitLShift:
3277             emitUntypedBitOp<JITLeftShiftGenerator, operationValueBitLShift>(node);
3278             return;
3279         case BitRShift:
3280         case BitURShift:
3281             emitUntypedRightShiftBitOp(node);
3282             return;
3283         default:
3284             RELEASE_ASSERT_NOT_REACHED();
3285         }
3286     }
3287
3288     if (rightChild->isInt32Constant()) {
3289         SpeculateInt32Operand op1(this, leftChild);
3290         GPRTemporary result(this, Reuse, op1);
3291
3292         shiftOp(op, op1.gpr(), rightChild->asInt32() & 0x1f, result.gpr());
3293
3294         int32Result(result.gpr(), node);
3295     } else {
3296         // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
3297         SpeculateInt32Operand op1(this, leftChild);
3298         SpeculateInt32Operand op2(this, rightChild);
3299         GPRTemporary result(this, Reuse, op1);
3300
3301         GPRReg reg1 = op1.gpr();
3302         GPRReg reg2 = op2.gpr();
3303         shiftOp(op, reg1, reg2, result.gpr());
3304
3305         int32Result(result.gpr(), node);
3306     }
3307 }
3308
3309 void SpeculativeJIT::compileValueAdd(Node* node)
3310 {
3311     Edge& leftChild = node->child1();
3312     Edge& rightChild = node->child2();
3313
3314     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3315         JSValueOperand left(this, leftChild);
3316         JSValueOperand right(this, rightChild);
3317         JSValueRegs leftRegs = left.jsValueRegs();
3318         JSValueRegs rightRegs = right.jsValueRegs();
3319 #if USE(JSVALUE64)
3320         GPRTemporary result(this);
3321         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3322 #else
3323         GPRTemporary resultTag(this);
3324         GPRTemporary resultPayload(this);
3325         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3326 #endif
3327         flushRegisters();
3328         callOperation(operationValueAddNotNumber, resultRegs, leftRegs, rightRegs);
3329         m_jit.exceptionCheck();
3330     
3331         jsValueResult(resultRegs, node);
3332         return;
3333     }
3334
3335     Optional<JSValueOperand> left;
3336     Optional<JSValueOperand> right;
3337
3338     JSValueRegs leftRegs;
3339     JSValueRegs rightRegs;
3340
3341     FPRTemporary leftNumber(this);
3342     FPRTemporary rightNumber(this);
3343     FPRReg leftFPR = leftNumber.fpr();
3344     FPRReg rightFPR = rightNumber.fpr();
3345
3346 #if USE(JSVALUE64)
3347     GPRTemporary result(this);
3348     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3349     GPRTemporary scratch(this);
3350     GPRReg scratchGPR = scratch.gpr();
3351     FPRReg scratchFPR = InvalidFPRReg;
3352 #else
3353     GPRTemporary resultTag(this);
3354     GPRTemporary resultPayload(this);
3355     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3356     GPRReg scratchGPR = resultTag.gpr();
3357     FPRTemporary fprScratch(this);
3358     FPRReg scratchFPR = fprScratch.fpr();
3359 #endif
3360
3361     SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3362     SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3363
3364     // The snippet generator does not support both operands being constant. If the left
3365     // operand is already const, we'll ignore the right operand's constness.
3366     if (leftChild->isInt32Constant())
3367         leftOperand.setConstInt32(leftChild->asInt32());
3368     else if (rightChild->isInt32Constant())
3369         rightOperand.setConstInt32(rightChild->asInt32());
3370
3371     ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3372
3373     if (!leftOperand.isConst()) {
3374         left = JSValueOperand(this, leftChild);
3375         leftRegs = left->jsValueRegs();
3376     }
3377     if (!rightOperand.isConst()) {
3378         right = JSValueOperand(this, rightChild);
3379         rightRegs = right->jsValueRegs();
3380     }
3381
3382     JITAddGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3383         leftFPR, rightFPR, scratchGPR, scratchFPR);
3384     gen.generateFastPath(m_jit);
3385
3386     ASSERT(gen.didEmitFastPath());
3387     gen.endJumpList().append(m_jit.jump());
3388
3389     gen.slowPathJumpList().link(&m_jit);
3390
3391     silentSpillAllRegisters(resultRegs);
3392
3393     if (leftOperand.isConst()) {
3394         leftRegs = resultRegs;
3395         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3396     } else if (rightOperand.isConst()) {
3397         rightRegs = resultRegs;
3398         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3399     }
3400
3401     callOperation(operationValueAdd, resultRegs, leftRegs, rightRegs);
3402
3403     silentFillAllRegisters(resultRegs);
3404     m_jit.exceptionCheck();
3405
3406     gen.endJumpList().link(&m_jit);
3407     jsValueResult(resultRegs, node);
3408     return;
3409 }
3410
3411 void SpeculativeJIT::compileInstanceOfCustom(Node* node)
3412 {
3413     // We could do something smarter here but this case is currently super rare and unless
3414     // Symbol.hasInstance becomes popular will likely remain that way.
3415
3416     JSValueOperand value(this, node->child1());
3417     SpeculateCellOperand constructor(this, node->child2());
3418     JSValueOperand hasInstanceValue(this, node->child3());
3419     GPRTemporary result(this);
3420
3421     JSValueRegs valueRegs = value.jsValueRegs();
3422     GPRReg constructorGPR = constructor.gpr();
3423     JSValueRegs hasInstanceRegs = hasInstanceValue.jsValueRegs();
3424     GPRReg resultGPR = result.gpr();
3425
3426     MacroAssembler::Jump slowCase = m_jit.jump();
3427
3428     addSlowPathGenerator(slowPathCall(slowCase, this, operationInstanceOfCustom, resultGPR, valueRegs, constructorGPR, hasInstanceRegs));
3429
3430     unblessedBooleanResult(resultGPR, node);
3431 }
3432
3433 void SpeculativeJIT::compileIsJSArray(Node* node)
3434 {
3435     JSValueOperand value(this, node->child1());
3436     GPRFlushedCallResult result(this);
3437
3438     JSValueRegs valueRegs = value.jsValueRegs();
3439     GPRReg resultGPR = result.gpr();
3440
3441     JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3442
3443     m_jit.compare8(JITCompiler::Equal,
3444         JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()),
3445         TrustedImm32(ArrayType),
3446         resultGPR);
3447     blessBoolean(resultGPR);
3448     JITCompiler::Jump done = m_jit.jump();
3449
3450     isNotCell.link(&m_jit);
3451     moveFalseTo(resultGPR);
3452
3453     done.link(&m_jit);
3454     blessedBooleanResult(resultGPR, node);
3455 }
3456
3457 void SpeculativeJIT::compileIsRegExpObject(Node* node)
3458 {
3459     JSValueOperand value(this, node->child1());
3460     GPRFlushedCallResult result(this);
3461
3462     JSValueRegs valueRegs = value.jsValueRegs();
3463     GPRReg resultGPR = result.gpr();
3464
3465     JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3466
3467     m_jit.compare8(JITCompiler::Equal,
3468         JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()),
3469         TrustedImm32(RegExpObjectType),
3470         resultGPR);
3471     blessBoolean(resultGPR);
3472     JITCompiler::Jump done = m_jit.jump();
3473
3474     isNotCell.link(&m_jit);
3475     moveFalseTo(resultGPR);
3476
3477     done.link(&m_jit);
3478     blessedBooleanResult(resultGPR, node);
3479 }
3480
3481 void SpeculativeJIT::compileIsTypedArrayView(Node* node)
3482 {
3483     JSValueOperand value(this, node->child1());
3484 #if USE(JSVALUE64)
3485     GPRTemporary result(this, Reuse, value);
3486 #else
3487     GPRTemporary result(this, Reuse, value, PayloadWord);
3488 #endif
3489
3490     JSValueRegs valueRegs = value.jsValueRegs();
3491     GPRReg resultGPR = result.gpr();
3492
3493     JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3494
3495     m_jit.load8(JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()), resultGPR);
3496     m_jit.sub32(TrustedImm32(Int8ArrayType), resultGPR);
3497     m_jit.compare32(JITCompiler::BelowOrEqual,
3498         resultGPR,
3499         TrustedImm32(Float64ArrayType - Int8ArrayType),
3500         resultGPR);
3501     blessBoolean(resultGPR);
3502     JITCompiler::Jump done = m_jit.jump();
3503
3504     isNotCell.link(&m_jit);
3505     moveFalseTo(resultGPR);
3506
3507     done.link(&m_jit);
3508     blessedBooleanResult(resultGPR, node);
3509 }
3510
3511 void SpeculativeJIT::compileCallObjectConstructor(Node* node)
3512 {
3513     RELEASE_ASSERT(node->child1().useKind() == UntypedUse);
3514     JSValueOperand value(this, node->child1());
3515 #if USE(JSVALUE64)
3516     GPRTemporary result(this, Reuse, value);
3517 #else
3518     GPRTemporary result(this, Reuse, value, PayloadWord);
3519 #endif
3520
3521     JSValueRegs valueRegs = value.jsValueRegs();
3522     GPRReg resultGPR = result.gpr();
3523
3524     MacroAssembler::JumpList slowCases;
3525     slowCases.append(m_jit.branchIfNotCell(valueRegs));
3526     slowCases.append(m_jit.branchIfNotObject(valueRegs.payloadGPR()));
3527     m_jit.move(valueRegs.payloadGPR(), resultGPR);
3528
3529     addSlowPathGenerator(slowPathCall(slowCases, this, operationObjectConstructor, resultGPR, m_jit.globalObjectFor(node->origin.semantic), valueRegs));
3530     cellResult(resultGPR, node);
3531 }
3532
3533 void SpeculativeJIT::compileArithAdd(Node* node)
3534 {
3535     switch (node->binaryUseKind()) {
3536     case Int32Use: {
3537         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3538
3539         if (node->child2()->isInt32Constant()) {
3540             SpeculateInt32Operand op1(this, node->child1());
3541             GPRTemporary result(this, Reuse, op1);
3542
3543             GPRReg gpr1 = op1.gpr();
3544             int32_t imm2 = node->child2()->asInt32();
3545             GPRReg gprResult = result.gpr();
3546
3547             if (!shouldCheckOverflow(node->arithMode())) {
3548                 m_jit.add32(Imm32(imm2), gpr1, gprResult);
3549                 int32Result(gprResult, node);
3550                 return;
3551             }
3552
3553             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, Imm32(imm2), gprResult);
3554             if (gpr1 == gprResult) {
3555                 speculationCheck(Overflow, JSValueRegs(), 0, check,
3556                     SpeculationRecovery(SpeculativeAddImmediate, gpr1, imm2));
3557             } else
3558                 speculationCheck(Overflow, JSValueRegs(), 0, check);
3559
3560             int32Result(gprResult, node);
3561             return;
3562         }
3563                 
3564         SpeculateInt32Operand op1(this, node->child1());
3565         SpeculateInt32Operand op2(this, node->child2());
3566         GPRTemporary result(this, Reuse, op1, op2);
3567
3568         GPRReg gpr1 = op1.gpr();
3569         GPRReg gpr2 = op2.gpr();
3570         GPRReg gprResult = result.gpr();
3571
3572         if (!shouldCheckOverflow(node->arithMode()))
3573             m_jit.add32(gpr1, gpr2, gprResult);
3574         else {
3575             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
3576                 
3577             if (gpr1 == gprResult)
3578                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
3579             else if (gpr2 == gprResult)
3580                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
3581             else
3582                 speculationCheck(Overflow, JSValueRegs(), 0, check);
3583         }
3584
3585         int32Result(gprResult, node);
3586         return;
3587     }
3588         
3589 #if USE(JSVALUE64)
3590     case Int52RepUse: {
3591         ASSERT(shouldCheckOverflow(node->arithMode()));
3592         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3593
3594         // Will we need an overflow check? If we can prove that neither input can be
3595         // Int52 then the overflow check will not be necessary.
3596         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52Only)
3597             && !m_state.forNode(node->child2()).couldBeType(SpecInt52Only)) {
3598             SpeculateWhicheverInt52Operand op1(this, node->child1());
3599             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3600             GPRTemporary result(this, Reuse, op1);
3601             m_jit.add64(op1.gpr(), op2.gpr(), result.gpr());
3602             int52Result(result.gpr(), node, op1.format());
3603             return;
3604         }
3605         
3606         SpeculateInt52Operand op1(this, node->child1());
3607         SpeculateInt52Operand op2(this, node->child2());
3608         GPRTemporary result(this);
3609         m_jit.move(op1.gpr(), result.gpr());
3610         speculationCheck(
3611             Int52Overflow, JSValueRegs(), 0,
3612             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3613         int52Result(result.gpr(), node);
3614         return;
3615     }
3616 #endif // USE(JSVALUE64)
3617     
3618     case DoubleRepUse: {
3619         SpeculateDoubleOperand op1(this, node->child1());
3620         SpeculateDoubleOperand op2(this, node->child2());
3621         FPRTemporary result(this, op1, op2);
3622
3623         FPRReg reg1 = op1.fpr();
3624         FPRReg reg2 = op2.fpr();
3625         m_jit.addDouble(reg1, reg2, result.fpr());
3626
3627         doubleResult(result.fpr(), node);
3628         return;
3629     }
3630         
3631     default:
3632         RELEASE_ASSERT_NOT_REACHED();
3633         break;
3634     }
3635 }
3636
3637 void SpeculativeJIT::compileMakeRope(Node* node)
3638 {
3639     ASSERT(node->child1().useKind() == KnownStringUse);
3640     ASSERT(node->child2().useKind() == KnownStringUse);
3641     ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
3642     
3643     SpeculateCellOperand op1(this, node->child1());
3644     SpeculateCellOperand op2(this, node->child2());
3645     SpeculateCellOperand op3(this, node->child3());
3646     GPRTemporary result(this);
3647     GPRTemporary allocator(this);
3648     GPRTemporary scratch(this);
3649     
3650     GPRReg opGPRs[3];
3651     unsigned numOpGPRs;
3652     opGPRs[0] = op1.gpr();
3653     opGPRs[1] = op2.gpr();
3654     if (node->child3()) {
3655         opGPRs[2] = op3.gpr();
3656         numOpGPRs = 3;
3657     } else {
3658         opGPRs[2] = InvalidGPRReg;
3659         numOpGPRs = 2;
3660     }
3661     GPRReg resultGPR = result.gpr();
3662     GPRReg allocatorGPR = allocator.gpr();
3663     GPRReg scratchGPR = scratch.gpr();
3664     
3665     JITCompiler::JumpList slowPath;
3666     MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithDestructor(sizeof(JSRopeString));
3667     m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
3668     emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
3669         
3670     m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
3671     for (unsigned i = 0; i < numOpGPRs; ++i)
3672         m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3673     for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
3674         m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3675     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
3676     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
3677     if (!ASSERT_DISABLED) {
3678         JITCompiler::Jump ok = m_jit.branch32(
3679             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
3680         m_jit.abortWithReason(DFGNegativeStringLength);
3681         ok.link(&m_jit);
3682     }
3683     for (unsigned i = 1; i < numOpGPRs; ++i) {
3684         m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
3685         speculationCheck(
3686             Uncountable, JSValueSource(), nullptr,
3687             m_jit.branchAdd32(
3688                 JITCompiler::Overflow,
3689                 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
3690     }
3691     m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
3692     m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
3693     if (!ASSERT_DISABLED) {
3694         JITCompiler::Jump ok = m_jit.branch32(
3695             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
3696         m_jit.abortWithReason(DFGNegativeStringLength);
3697         ok.link(&m_jit);
3698     }
3699     m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
3700     
3701     switch (numOpGPRs) {
3702     case 2:
3703         addSlowPathGenerator(slowPathCall(
3704             slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
3705         break;
3706     case 3:
3707         addSlowPathGenerator(slowPathCall(
3708             slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
3709         break;
3710     default:
3711         RELEASE_ASSERT_NOT_REACHED();
3712         break;
3713     }
3714         
3715     cellResult(resultGPR, node);
3716 }
3717
3718 void SpeculativeJIT::compileArithClz32(Node* node)
3719 {
3720     ASSERT_WITH_MESSAGE(node->child1().useKind() == Int32Use || node->child1().useKind() == KnownInt32Use, "The Fixup phase should have enforced a Int32 operand.");
3721     SpeculateInt32Operand value(this, node->child1());
3722     GPRTemporary result(this, Reuse, value);
3723     GPRReg valueReg = value.gpr();
3724     GPRReg resultReg = result.gpr();
3725     m_jit.countLeadingZeros32(valueReg, resultReg);
3726     int32Result(resultReg, node);
3727 }
3728
3729 void SpeculativeJIT::compileArithSub(Node* node)
3730 {
3731     switch (node->binaryUseKind()) {
3732     case Int32Use: {
3733         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3734         
3735         if (node->child2()->isInt32Constant()) {
3736             SpeculateInt32Operand op1(this, node->child1());
3737             int32_t imm2 = node->child2()->asInt32();
3738             GPRTemporary result(this);
3739
3740             if (!shouldCheckOverflow(node->arithMode())) {
3741                 m_jit.move(op1.gpr(), result.gpr());
3742                 m_jit.sub32(Imm32(imm2), result.gpr());
3743             } else {
3744                 GPRTemporary scratch(this);
3745                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
3746             }
3747
3748             int32Result(result.gpr(), node);
3749             return;
3750         }
3751             
3752         if (node->child1()->isInt32Constant()) {
3753             int32_t imm1 = node->child1()->asInt32();
3754             SpeculateInt32Operand op2(this, node->child2());
3755             GPRTemporary result(this);
3756                 
3757             m_jit.move(Imm32(imm1), result.gpr());
3758             if (!shouldCheckOverflow(node->arithMode()))
3759                 m_jit.sub32(op2.gpr(), result.gpr());
3760             else
3761                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3762                 
3763             int32Result(result.gpr(), node);
3764             return;
3765         }
3766             
3767         SpeculateInt32Operand op1(this, node->child1());
3768         SpeculateInt32Operand op2(this, node->child2());
3769         GPRTemporary result(this);
3770
3771         if (!shouldCheckOverflow(node->arithMode())) {
3772             m_jit.move(op1.gpr(), result.gpr());
3773             m_jit.sub32(op2.gpr(), result.gpr());
3774         } else
3775             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
3776
3777         int32Result(result.gpr(), node);
3778         return;
3779     }
3780         
3781 #if USE(JSVALUE64)
3782     case Int52RepUse: {
3783         ASSERT(shouldCheckOverflow(node->arithMode()));
3784         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3785
3786         // Will we need an overflow check? If we can prove that neither input can be
3787         // Int52 then the overflow check will not be necessary.
3788         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52Only)
3789             && !m_state.forNode(node->child2()).couldBeType(SpecInt52Only)) {
3790             SpeculateWhicheverInt52Operand op1(this, node->child1());
3791             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3792             GPRTemporary result(this, Reuse, op1);
3793             m_jit.move(op1.gpr(), result.gpr());
3794             m_jit.sub64(op2.gpr(), result.gpr());
3795             int52Result(result.gpr(), node, op1.format());
3796             return;
3797         }
3798         
3799         SpeculateInt52Operand op1(this, node->child1());
3800         SpeculateInt52Operand op2(this, node->child2());
3801         GPRTemporary result(this);
3802         m_jit.move(op1.gpr(), result.gpr());
3803         speculationCheck(
3804             Int52Overflow, JSValueRegs(), 0,
3805             m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3806         int52Result(result.gpr(), node);
3807         return;
3808     }
3809 #endif // USE(JSVALUE64)
3810
3811     case DoubleRepUse: {
3812         SpeculateDoubleOperand op1(this, node->child1());
3813         SpeculateDoubleOperand op2(this, node->child2());
3814         FPRTemporary result(this, op1);
3815
3816         FPRReg reg1 = op1.fpr();
3817         FPRReg reg2 = op2.fpr();
3818         m_jit.subDouble(reg1, reg2, result.fpr());
3819
3820         doubleResult(result.fpr(), node);
3821         return;
3822     }
3823
3824     case UntypedUse: {
3825         Edge& leftChild = node->child1();
3826         Edge& rightChild = node->child2();
3827
3828         JSValueOperand left(this, leftChild);
3829         JSValueOperand right(this, rightChild);
3830
3831         JSValueRegs leftRegs = left.jsValueRegs();
3832         JSValueRegs rightRegs = right.jsValueRegs();
3833
3834         FPRTemporary leftNumber(this);
3835         FPRTemporary rightNumber(this);
3836         FPRReg leftFPR = leftNumber.fpr();
3837         FPRReg rightFPR = rightNumber.fpr();
3838
3839 #if USE(JSVALUE64)
3840         GPRTemporary result(this);
3841         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3842         GPRTemporary scratch(this);
3843         GPRReg scratchGPR = scratch.gpr();
3844         FPRReg scratchFPR = InvalidFPRReg;
3845 #else
3846         GPRTemporary resultTag(this);
3847         GPRTemporary resultPayload(this);
3848         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3849         GPRReg scratchGPR = resultTag.gpr();
3850         FPRTemporary fprScratch(this);
3851         FPRReg scratchFPR = fprScratch.fpr();
3852 #endif
3853
3854         SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3855         SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3856
3857         JITSubGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3858             leftFPR, rightFPR, scratchGPR, scratchFPR);
3859         gen.generateFastPath(m_jit);
3860
3861         ASSERT(gen.didEmitFastPath());
3862         gen.endJumpList().append(m_jit.jump());
3863
3864         gen.slowPathJumpList().link(&m_jit);
3865         silentSpillAllRegisters(resultRegs);
3866         callOperation(operationValueSub, resultRegs, leftRegs, rightRegs);
3867         silentFillAllRegisters(resultRegs);
3868         m_jit.exceptionCheck();
3869
3870         gen.endJumpList().link(&m_jit);
3871         jsValueResult(resultRegs, node);
3872         return;
3873     }
3874
3875     default:
3876         RELEASE_ASSERT_NOT_REACHED();
3877         return;
3878     }
3879 }
3880
3881 void SpeculativeJIT::compileArithNegate(Node* node)
3882 {
3883     switch (node->child1().useKind()) {
3884     case Int32Use: {
3885         SpeculateInt32Operand op1(this, node->child1());
3886         GPRTemporary result(this);
3887
3888         m_jit.move(op1.gpr(), result.gpr());
3889
3890         // Note: there is no notion of being not used as a number, but someone
3891         // caring about negative zero.
3892         
3893         if (!shouldCheckOverflow(node->arithMode()))
3894             m_jit.neg32(result.gpr());
3895         else if (!shouldCheckNegativeZero(node->arithMode()))
3896             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
3897         else {
3898             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
3899             m_jit.neg32(result.gpr());
3900         }
3901
3902         int32Result(result.gpr(), node);
3903         return;
3904     }
3905
3906 #if USE(JSVALUE64)
3907     case Int52RepUse: {
3908         ASSERT(shouldCheckOverflow(node->arithMode()));
3909         
3910         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52Only)) {
3911             SpeculateWhicheverInt52Operand op1(this, node->child1());
3912             GPRTemporary result(this);
3913             GPRReg op1GPR = op1.gpr();
3914             GPRReg resultGPR = result.gpr();
3915             m_jit.move(op1GPR, resultGPR);
3916             m_jit.neg64(resultGPR);
3917             if (shouldCheckNegativeZero(node->arithMode())) {
3918                 speculationCheck(
3919                     NegativeZero, JSValueRegs(), 0,
3920                     m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3921             }
3922             int52Result(resultGPR, node, op1.format());
3923             return;
3924         }
3925         
3926         SpeculateInt52Operand op1(this, node->child1());
3927         GPRTemporary result(this);
3928         GPRReg op1GPR = op1.gpr();
3929         GPRReg resultGPR = result.gpr();
3930         m_jit.move(op1GPR, resultGPR);
3931         speculationCheck(
3932             Int52Overflow, JSValueRegs(), 0,
3933             m_jit.branchNeg64(MacroAssembler::Overflow, resultGPR));
3934         if (shouldCheckNegativeZero(node->arithMode())) {
3935             speculationCheck(
3936                 NegativeZero, JSValueRegs(), 0,
3937                 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3938         }
3939         int52Result(resultGPR, node);
3940         return;
3941     }
3942 #endif // USE(JSVALUE64)
3943         
3944     case DoubleRepUse: {
3945         SpeculateDoubleOperand op1(this, node->child1());
3946         FPRTemporary result(this);
3947         
3948         m_jit.negateDouble(op1.fpr(), result.fpr());
3949         
3950         doubleResult(result.fpr(), node);
3951         return;
3952     }
3953         
3954     default:
3955         RELEASE_ASSERT_NOT_REACHED();
3956         return;
3957     }
3958 }
3959 void SpeculativeJIT::compileArithMul(Node* node)
3960 {
3961     switch (node->binaryUseKind()) {
3962     case Int32Use: {
3963         if (node->child2()->isInt32Constant()) {
3964             SpeculateInt32Operand op1(this, node->child1());
3965             GPRTemporary result(this);
3966
3967             int32_t imm = node->child2()->asInt32();
3968             GPRReg op1GPR = op1.gpr();
3969             GPRReg resultGPR = result.gpr();
3970
3971             if (!shouldCheckOverflow(node->arithMode()))
3972                 m_jit.mul32(Imm32(imm), op1GPR, resultGPR);
3973             else {
3974                 speculationCheck(Overflow, JSValueRegs(), 0,
3975                     m_jit.branchMul32(MacroAssembler::Overflow, op1GPR, Imm32(imm), resultGPR));
3976             }
3977
3978             // The only way to create negative zero with a constant is:
3979             // -negative-op1 * 0.
3980             // -zero-op1 * negative constant.
3981             if (shouldCheckNegativeZero(node->arithMode())) {
3982                 if (!imm)
3983                     speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Signed, op1GPR));
3984                 else if (imm < 0) {
3985                     if (shouldCheckOverflow(node->arithMode()))
3986                         speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, resultGPR));
3987                     else
3988                         speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, op1GPR));
3989                 }