1b492e8da245608410302943ba5e89910789f4ae
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGOSRExitFuzz.h"
38 #include "DFGSaneStringGetByValSlowPathGenerator.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DirectArguments.h"
41 #include "JITAddGenerator.h"
42 #include "JITBitAndGenerator.h"
43 #include "JITBitOrGenerator.h"
44 #include "JITBitXorGenerator.h"
45 #include "JITDivGenerator.h"
46 #include "JITLeftShiftGenerator.h"
47 #include "JITMulGenerator.h"
48 #include "JITRightShiftGenerator.h"
49 #include "JITSubGenerator.h"
50 #include "JSCInlines.h"
51 #include "JSEnvironmentRecord.h"
52 #include "JSGeneratorFunction.h"
53 #include "JSLexicalEnvironment.h"
54 #include "LinkBuffer.h"
55 #include "RegExpConstructor.h"
56 #include "ScopedArguments.h"
57 #include "ScratchRegisterAllocator.h"
58 #include "WriteBarrierBuffer.h"
59 #include <wtf/MathExtras.h>
60
61 namespace JSC { namespace DFG {
62
63 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
64     : m_compileOkay(true)
65     , m_jit(jit)
66     , m_currentNode(0)
67     , m_lastGeneratedNode(LastNodeType)
68     , m_indexInBlock(0)
69     , m_generationInfo(m_jit.graph().frameRegisterCount())
70     , m_state(m_jit.graph())
71     , m_interpreter(m_jit.graph(), m_state)
72     , m_stream(&jit.jitCode()->variableEventStream)
73     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
74 {
75 }
76
77 SpeculativeJIT::~SpeculativeJIT()
78 {
79 }
80
81 void SpeculativeJIT::emitAllocateRawObject(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements, unsigned vectorLength)
82 {
83     IndexingType indexingType = structure->indexingType();
84     bool hasIndexingHeader = hasIndexedProperties(indexingType);
85
86     unsigned inlineCapacity = structure->inlineCapacity();
87     unsigned outOfLineCapacity = structure->outOfLineCapacity();
88     
89     GPRTemporary scratch(this);
90     GPRTemporary scratch2(this);
91     GPRReg scratchGPR = scratch.gpr();
92     GPRReg scratch2GPR = scratch2.gpr();
93
94     ASSERT(vectorLength >= numElements);
95     vectorLength = std::max(BASE_VECTOR_LEN, vectorLength);
96     
97     JITCompiler::JumpList slowCases;
98
99     size_t size = 0;
100     if (hasIndexingHeader)
101         size += vectorLength * sizeof(JSValue) + sizeof(IndexingHeader);
102     size += outOfLineCapacity * sizeof(JSValue);
103
104     if (size) {
105         slowCases.append(
106             emitAllocateBasicStorage(TrustedImm32(size), storageGPR));
107         if (hasIndexingHeader)
108             m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
109         else
110             m_jit.addPtr(TrustedImm32(sizeof(IndexingHeader)), storageGPR);
111     } else
112         m_jit.move(TrustedImmPtr(0), storageGPR);
113
114     size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
115     MarkedAllocator* allocatorPtr = &m_jit.vm()->heap.allocatorForObjectWithoutDestructor(allocationSize);
116     m_jit.move(TrustedImmPtr(allocatorPtr), scratchGPR);
117     emitAllocateJSObject(resultGPR, scratchGPR, TrustedImmPtr(structure), storageGPR, scratch2GPR, slowCases);
118
119     if (hasIndexingHeader)
120         m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
121
122     // I want a slow path that also loads out the storage pointer, and that's
123     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
124     // of work for a very small piece of functionality. :-/
125     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
126         slowCases, this, operationNewRawObject, resultGPR, storageGPR,
127         structure, vectorLength));
128
129     if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
130 #if USE(JSVALUE64)
131         m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
132         for (unsigned i = numElements; i < vectorLength; ++i)
133             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
134 #else
135         EncodedValueDescriptor value;
136         value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
137         for (unsigned i = numElements; i < vectorLength; ++i) {
138             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
139             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
140         }
141 #endif
142     }
143     
144     if (hasIndexingHeader)
145         m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
146 }
147
148 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
149 {
150     if (inlineCallFrame && !inlineCallFrame->isVarargs())
151         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
152     else {
153         VirtualRegister argumentCountRegister;
154         if (!inlineCallFrame)
155             argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
156         else
157             argumentCountRegister = inlineCallFrame->argumentCountRegister;
158         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
159         if (!includeThis)
160             m_jit.sub32(TrustedImm32(1), lengthGPR);
161     }
162 }
163
164 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
165 {
166     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
167 }
168
169 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
170 {
171     if (origin.inlineCallFrame) {
172         if (origin.inlineCallFrame->isClosureCall) {
173             m_jit.loadPtr(
174                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
175                 calleeGPR);
176         } else {
177             m_jit.move(
178                 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
179                 calleeGPR);
180         }
181     } else
182         m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), calleeGPR);
183 }
184
185 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
186 {
187     m_jit.addPtr(
188         TrustedImm32(
189             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
190         GPRInfo::callFrameRegister, startGPR);
191 }
192
193 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
194 {
195     if (!doOSRExitFuzzing())
196         return MacroAssembler::Jump();
197     
198     MacroAssembler::Jump result;
199     
200     m_jit.pushToSave(GPRInfo::regT0);
201     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
202     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
203     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
204     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
205     unsigned at = Options::fireOSRExitFuzzAt();
206     if (at || atOrAfter) {
207         unsigned threshold;
208         MacroAssembler::RelationalCondition condition;
209         if (atOrAfter) {
210             threshold = atOrAfter;
211             condition = MacroAssembler::Below;
212         } else {
213             threshold = at;
214             condition = MacroAssembler::NotEqual;
215         }
216         MacroAssembler::Jump ok = m_jit.branch32(
217             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
218         m_jit.popToRestore(GPRInfo::regT0);
219         result = m_jit.jump();
220         ok.link(&m_jit);
221     }
222     m_jit.popToRestore(GPRInfo::regT0);
223     
224     return result;
225 }
226
227 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
228 {
229     if (!m_compileOkay)
230         return;
231     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
232     if (fuzzJump.isSet()) {
233         JITCompiler::JumpList jumpsToFail;
234         jumpsToFail.append(fuzzJump);
235         jumpsToFail.append(jumpToFail);
236         m_jit.appendExitInfo(jumpsToFail);
237     } else
238         m_jit.appendExitInfo(jumpToFail);
239     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
240 }
241
242 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
243 {
244     if (!m_compileOkay)
245         return;
246     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
247     if (fuzzJump.isSet()) {
248         JITCompiler::JumpList myJumpsToFail;
249         myJumpsToFail.append(jumpsToFail);
250         myJumpsToFail.append(fuzzJump);
251         m_jit.appendExitInfo(myJumpsToFail);
252     } else
253         m_jit.appendExitInfo(jumpsToFail);
254     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
255 }
256
257 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
258 {
259     if (!m_compileOkay)
260         return OSRExitJumpPlaceholder();
261     unsigned index = m_jit.jitCode()->osrExit.size();
262     m_jit.appendExitInfo();
263     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
264     return OSRExitJumpPlaceholder(index);
265 }
266
267 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
268 {
269     return speculationCheck(kind, jsValueSource, nodeUse.node());
270 }
271
272 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
273 {
274     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
275 }
276
277 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
278 {
279     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
280 }
281
282 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
283 {
284     if (!m_compileOkay)
285         return;
286     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
287     m_jit.appendExitInfo(jumpToFail);
288     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
289 }
290
291 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
292 {
293     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
294 }
295
296 void SpeculativeJIT::emitInvalidationPoint(Node* node)
297 {
298     if (!m_compileOkay)
299         return;
300     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
301     m_jit.jitCode()->appendOSRExit(OSRExit(
302         UncountableInvalidation, JSValueSource(),
303         m_jit.graph().methodOfGettingAValueProfileFor(node),
304         this, m_stream->size()));
305     info.m_replacementSource = m_jit.watchpointLabel();
306     ASSERT(info.m_replacementSource.isSet());
307     noResult(node);
308 }
309
310 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
311 {
312     if (!m_compileOkay)
313         return;
314     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
315     m_compileOkay = false;
316     if (verboseCompilationEnabled())
317         dataLog("Bailing compilation.\n");
318 }
319
320 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
321 {
322     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
323 }
324
325 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind exitKind)
326 {
327     ASSERT(needsTypeCheck(edge, typesPassedThrough));
328     m_interpreter.filter(edge, typesPassedThrough);
329     speculationCheck(exitKind, source, edge.node(), jumpToFail);
330 }
331
332 RegisterSet SpeculativeJIT::usedRegisters()
333 {
334     RegisterSet result;
335     
336     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
337         GPRReg gpr = GPRInfo::toRegister(i);
338         if (m_gprs.isInUse(gpr))
339             result.set(gpr);
340     }
341     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
342         FPRReg fpr = FPRInfo::toRegister(i);
343         if (m_fprs.isInUse(fpr))
344             result.set(fpr);
345     }
346     
347     result.merge(RegisterSet::stubUnavailableRegisters());
348     
349     return result;
350 }
351
352 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
353 {
354     m_slowPathGenerators.append(WTFMove(slowPathGenerator));
355 }
356
357 void SpeculativeJIT::addSlowPathGenerator(std::function<void()> lambda)
358 {
359     m_slowPathLambdas.append(std::make_pair(lambda, m_origin.semantic));
360 }
361
362 void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder)
363 {
364     for (auto& slowPathGenerator : m_slowPathGenerators) {
365         pcToCodeOriginMapBuilder.appendItem(m_jit.label(), slowPathGenerator->origin().semantic);
366         slowPathGenerator->generate(this);
367     }
368     for (auto& generatorPair : m_slowPathLambdas) {
369         pcToCodeOriginMapBuilder.appendItem(m_jit.label(), generatorPair.second);
370         generatorPair.first();
371     }
372 }
373
374 void SpeculativeJIT::clearGenerationInfo()
375 {
376     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
377         m_generationInfo[i] = GenerationInfo();
378     m_gprs = RegisterBank<GPRInfo>();
379     m_fprs = RegisterBank<FPRInfo>();
380 }
381
382 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
383 {
384     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
385     Node* node = info.node();
386     DataFormat registerFormat = info.registerFormat();
387     ASSERT(registerFormat != DataFormatNone);
388     ASSERT(registerFormat != DataFormatDouble);
389         
390     SilentSpillAction spillAction;
391     SilentFillAction fillAction;
392         
393     if (!info.needsSpill())
394         spillAction = DoNothingForSpill;
395     else {
396 #if USE(JSVALUE64)
397         ASSERT(info.gpr() == source);
398         if (registerFormat == DataFormatInt32)
399             spillAction = Store32Payload;
400         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
401             spillAction = StorePtr;
402         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
403             spillAction = Store64;
404         else {
405             ASSERT(registerFormat & DataFormatJS);
406             spillAction = Store64;
407         }
408 #elif USE(JSVALUE32_64)
409         if (registerFormat & DataFormatJS) {
410             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
411             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
412         } else {
413             ASSERT(info.gpr() == source);
414             spillAction = Store32Payload;
415         }
416 #endif
417     }
418         
419     if (registerFormat == DataFormatInt32) {
420         ASSERT(info.gpr() == source);
421         ASSERT(isJSInt32(info.registerFormat()));
422         if (node->hasConstant()) {
423             ASSERT(node->isInt32Constant());
424             fillAction = SetInt32Constant;
425         } else
426             fillAction = Load32Payload;
427     } else if (registerFormat == DataFormatBoolean) {
428 #if USE(JSVALUE64)
429         RELEASE_ASSERT_NOT_REACHED();
430 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
431         fillAction = DoNothingForFill;
432 #endif
433 #elif USE(JSVALUE32_64)
434         ASSERT(info.gpr() == source);
435         if (node->hasConstant()) {
436             ASSERT(node->isBooleanConstant());
437             fillAction = SetBooleanConstant;
438         } else
439             fillAction = Load32Payload;
440 #endif
441     } else if (registerFormat == DataFormatCell) {
442         ASSERT(info.gpr() == source);
443         if (node->hasConstant()) {
444             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
445             node->asCell(); // To get the assertion.
446             fillAction = SetCellConstant;
447         } else {
448 #if USE(JSVALUE64)
449             fillAction = LoadPtr;
450 #else
451             fillAction = Load32Payload;
452 #endif
453         }
454     } else if (registerFormat == DataFormatStorage) {
455         ASSERT(info.gpr() == source);
456         fillAction = LoadPtr;
457     } else if (registerFormat == DataFormatInt52) {
458         if (node->hasConstant())
459             fillAction = SetInt52Constant;
460         else if (info.spillFormat() == DataFormatInt52)
461             fillAction = Load64;
462         else if (info.spillFormat() == DataFormatStrictInt52)
463             fillAction = Load64ShiftInt52Left;
464         else if (info.spillFormat() == DataFormatNone)
465             fillAction = Load64;
466         else {
467             RELEASE_ASSERT_NOT_REACHED();
468 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
469             fillAction = Load64; // Make GCC happy.
470 #endif
471         }
472     } else if (registerFormat == DataFormatStrictInt52) {
473         if (node->hasConstant())
474             fillAction = SetStrictInt52Constant;
475         else if (info.spillFormat() == DataFormatInt52)
476             fillAction = Load64ShiftInt52Right;
477         else if (info.spillFormat() == DataFormatStrictInt52)
478             fillAction = Load64;
479         else if (info.spillFormat() == DataFormatNone)
480             fillAction = Load64;
481         else {
482             RELEASE_ASSERT_NOT_REACHED();
483 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
484             fillAction = Load64; // Make GCC happy.
485 #endif
486         }
487     } else {
488         ASSERT(registerFormat & DataFormatJS);
489 #if USE(JSVALUE64)
490         ASSERT(info.gpr() == source);
491         if (node->hasConstant()) {
492             if (node->isCellConstant())
493                 fillAction = SetTrustedJSConstant;
494             else
495                 fillAction = SetJSConstant;
496         } else if (info.spillFormat() == DataFormatInt32) {
497             ASSERT(registerFormat == DataFormatJSInt32);
498             fillAction = Load32PayloadBoxInt;
499         } else
500             fillAction = Load64;
501 #else
502         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
503         if (node->hasConstant())
504             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
505         else if (info.payloadGPR() == source)
506             fillAction = Load32Payload;
507         else { // Fill the Tag
508             switch (info.spillFormat()) {
509             case DataFormatInt32:
510                 ASSERT(registerFormat == DataFormatJSInt32);
511                 fillAction = SetInt32Tag;
512                 break;
513             case DataFormatCell:
514                 ASSERT(registerFormat == DataFormatJSCell);
515                 fillAction = SetCellTag;
516                 break;
517             case DataFormatBoolean:
518                 ASSERT(registerFormat == DataFormatJSBoolean);
519                 fillAction = SetBooleanTag;
520                 break;
521             default:
522                 fillAction = Load32Tag;
523                 break;
524             }
525         }
526 #endif
527     }
528         
529     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
530 }
531     
532 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
533 {
534     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
535     Node* node = info.node();
536     ASSERT(info.registerFormat() == DataFormatDouble);
537
538     SilentSpillAction spillAction;
539     SilentFillAction fillAction;
540         
541     if (!info.needsSpill())
542         spillAction = DoNothingForSpill;
543     else {
544         ASSERT(!node->hasConstant());
545         ASSERT(info.spillFormat() == DataFormatNone);
546         ASSERT(info.fpr() == source);
547         spillAction = StoreDouble;
548     }
549         
550 #if USE(JSVALUE64)
551     if (node->hasConstant()) {
552         node->asNumber(); // To get the assertion.
553         fillAction = SetDoubleConstant;
554     } else {
555         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
556         fillAction = LoadDouble;
557     }
558 #elif USE(JSVALUE32_64)
559     ASSERT(info.registerFormat() == DataFormatDouble);
560     if (node->hasConstant()) {
561         node->asNumber(); // To get the assertion.
562         fillAction = SetDoubleConstant;
563     } else
564         fillAction = LoadDouble;
565 #endif
566
567     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
568 }
569     
570 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
571 {
572     switch (plan.spillAction()) {
573     case DoNothingForSpill:
574         break;
575     case Store32Tag:
576         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
577         break;
578     case Store32Payload:
579         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
580         break;
581     case StorePtr:
582         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
583         break;
584 #if USE(JSVALUE64)
585     case Store64:
586         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
587         break;
588 #endif
589     case StoreDouble:
590         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
591         break;
592     default:
593         RELEASE_ASSERT_NOT_REACHED();
594     }
595 }
596     
597 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
598 {
599 #if USE(JSVALUE32_64)
600     UNUSED_PARAM(canTrample);
601 #endif
602     switch (plan.fillAction()) {
603     case DoNothingForFill:
604         break;
605     case SetInt32Constant:
606         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
607         break;
608 #if USE(JSVALUE64)
609     case SetInt52Constant:
610         m_jit.move(Imm64(plan.node()->asAnyInt() << JSValue::int52ShiftAmount), plan.gpr());
611         break;
612     case SetStrictInt52Constant:
613         m_jit.move(Imm64(plan.node()->asAnyInt()), plan.gpr());
614         break;
615 #endif // USE(JSVALUE64)
616     case SetBooleanConstant:
617         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
618         break;
619     case SetCellConstant:
620         m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
621         break;
622 #if USE(JSVALUE64)
623     case SetTrustedJSConstant:
624         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
625         break;
626     case SetJSConstant:
627         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
628         break;
629     case SetDoubleConstant:
630         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
631         m_jit.move64ToDouble(canTrample, plan.fpr());
632         break;
633     case Load32PayloadBoxInt:
634         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
635         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
636         break;
637     case Load32PayloadConvertToInt52:
638         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
639         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
640         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
641         break;
642     case Load32PayloadSignExtend:
643         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
644         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
645         break;
646 #else
647     case SetJSConstantTag:
648         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
649         break;
650     case SetJSConstantPayload:
651         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
652         break;
653     case SetInt32Tag:
654         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
655         break;
656     case SetCellTag:
657         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
658         break;
659     case SetBooleanTag:
660         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
661         break;
662     case SetDoubleConstant:
663         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
664         break;
665 #endif
666     case Load32Tag:
667         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
668         break;
669     case Load32Payload:
670         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
671         break;
672     case LoadPtr:
673         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
674         break;
675 #if USE(JSVALUE64)
676     case Load64:
677         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
678         break;
679     case Load64ShiftInt52Right:
680         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
681         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
682         break;
683     case Load64ShiftInt52Left:
684         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
685         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
686         break;
687 #endif
688     case LoadDouble:
689         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
690         break;
691     default:
692         RELEASE_ASSERT_NOT_REACHED();
693     }
694 }
695     
696 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
697 {
698     switch (arrayMode.arrayClass()) {
699     case Array::OriginalArray: {
700         CRASH();
701 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
702         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
703         return result;
704 #endif
705     }
706         
707     case Array::Array:
708         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
709         return m_jit.branch32(
710             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
711         
712     case Array::NonArray:
713     case Array::OriginalNonArray:
714         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
715         return m_jit.branch32(
716             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
717         
718     case Array::PossiblyArray:
719         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
720         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
721     }
722     
723     RELEASE_ASSERT_NOT_REACHED();
724     return JITCompiler::Jump();
725 }
726
727 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
728 {
729     JITCompiler::JumpList result;
730     
731     switch (arrayMode.type()) {
732     case Array::Int32:
733         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
734
735     case Array::Double:
736         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
737
738     case Array::Contiguous:
739         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
740
741     case Array::Undecided:
742         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
743
744     case Array::ArrayStorage:
745     case Array::SlowPutArrayStorage: {
746         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
747         
748         if (arrayMode.isJSArray()) {
749             if (arrayMode.isSlowPut()) {
750                 result.append(
751                     m_jit.branchTest32(
752                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
753                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
754                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
755                 result.append(
756                     m_jit.branch32(
757                         MacroAssembler::Above, tempGPR,
758                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
759                 break;
760             }
761             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
762             result.append(
763                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
764             break;
765         }
766         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
767         if (arrayMode.isSlowPut()) {
768             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
769             result.append(
770                 m_jit.branch32(
771                     MacroAssembler::Above, tempGPR,
772                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
773             break;
774         }
775         result.append(
776             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
777         break;
778     }
779     default:
780         CRASH();
781         break;
782     }
783     
784     return result;
785 }
786
787 void SpeculativeJIT::checkArray(Node* node)
788 {
789     ASSERT(node->arrayMode().isSpecific());
790     ASSERT(!node->arrayMode().doesConversion());
791     
792     SpeculateCellOperand base(this, node->child1());
793     GPRReg baseReg = base.gpr();
794     
795     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
796         noResult(m_currentNode);
797         return;
798     }
799     
800     const ClassInfo* expectedClassInfo = 0;
801     
802     switch (node->arrayMode().type()) {
803     case Array::AnyTypedArray:
804     case Array::String:
805         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
806         break;
807     case Array::Int32:
808     case Array::Double:
809     case Array::Contiguous:
810     case Array::Undecided:
811     case Array::ArrayStorage:
812     case Array::SlowPutArrayStorage: {
813         GPRTemporary temp(this);
814         GPRReg tempGPR = temp.gpr();
815         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
816         speculationCheck(
817             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
818             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
819         
820         noResult(m_currentNode);
821         return;
822     }
823     case Array::DirectArguments:
824         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
825         noResult(m_currentNode);
826         return;
827     case Array::ScopedArguments:
828         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
829         noResult(m_currentNode);
830         return;
831     default:
832         speculateCellTypeWithoutTypeFiltering(
833             node->child1(), baseReg,
834             typeForTypedArrayType(node->arrayMode().typedArrayType()));
835         noResult(m_currentNode);
836         return;
837     }
838     
839     RELEASE_ASSERT(expectedClassInfo);
840     
841     GPRTemporary temp(this);
842     GPRTemporary temp2(this);
843     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
844     speculationCheck(
845         BadType, JSValueSource::unboxedCell(baseReg), node,
846         m_jit.branchPtr(
847             MacroAssembler::NotEqual,
848             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
849             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
850     
851     noResult(m_currentNode);
852 }
853
854 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
855 {
856     ASSERT(node->arrayMode().doesConversion());
857     
858     GPRTemporary temp(this);
859     GPRTemporary structure;
860     GPRReg tempGPR = temp.gpr();
861     GPRReg structureGPR = InvalidGPRReg;
862     
863     if (node->op() != ArrayifyToStructure) {
864         GPRTemporary realStructure(this);
865         structure.adopt(realStructure);
866         structureGPR = structure.gpr();
867     }
868         
869     // We can skip all that comes next if we already have array storage.
870     MacroAssembler::JumpList slowPath;
871     
872     if (node->op() == ArrayifyToStructure) {
873         slowPath.append(m_jit.branchWeakStructure(
874             JITCompiler::NotEqual,
875             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
876             node->structure()));
877     } else {
878         m_jit.load8(
879             MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
880         
881         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
882     }
883     
884     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
885         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
886     
887     noResult(m_currentNode);
888 }
889
890 void SpeculativeJIT::arrayify(Node* node)
891 {
892     ASSERT(node->arrayMode().isSpecific());
893     
894     SpeculateCellOperand base(this, node->child1());
895     
896     if (!node->child2()) {
897         arrayify(node, base.gpr(), InvalidGPRReg);
898         return;
899     }
900     
901     SpeculateInt32Operand property(this, node->child2());
902     
903     arrayify(node, base.gpr(), property.gpr());
904 }
905
906 GPRReg SpeculativeJIT::fillStorage(Edge edge)
907 {
908     VirtualRegister virtualRegister = edge->virtualRegister();
909     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
910     
911     switch (info.registerFormat()) {
912     case DataFormatNone: {
913         if (info.spillFormat() == DataFormatStorage) {
914             GPRReg gpr = allocate();
915             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
916             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
917             info.fillStorage(*m_stream, gpr);
918             return gpr;
919         }
920         
921         // Must be a cell; fill it as a cell and then return the pointer.
922         return fillSpeculateCell(edge);
923     }
924         
925     case DataFormatStorage: {
926         GPRReg gpr = info.gpr();
927         m_gprs.lock(gpr);
928         return gpr;
929     }
930         
931     default:
932         return fillSpeculateCell(edge);
933     }
934 }
935
936 void SpeculativeJIT::useChildren(Node* node)
937 {
938     if (node->flags() & NodeHasVarArgs) {
939         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
940             if (!!m_jit.graph().m_varArgChildren[childIdx])
941                 use(m_jit.graph().m_varArgChildren[childIdx]);
942         }
943     } else {
944         Edge child1 = node->child1();
945         if (!child1) {
946             ASSERT(!node->child2() && !node->child3());
947             return;
948         }
949         use(child1);
950         
951         Edge child2 = node->child2();
952         if (!child2) {
953             ASSERT(!node->child3());
954             return;
955         }
956         use(child2);
957         
958         Edge child3 = node->child3();
959         if (!child3)
960             return;
961         use(child3);
962     }
963 }
964
965 void SpeculativeJIT::compileTryGetById(Node* node)
966 {
967     switch (node->child1().useKind()) {
968     case CellUse: {
969         SpeculateCellOperand base(this, node->child1());
970         JSValueRegsTemporary result(this, Reuse, base);
971
972         JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
973         JSValueRegs resultRegs = result.regs();
974
975         base.use();
976
977         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), JITCompiler::Jump(), NeedToSpill, AccessType::GetPure);
978
979         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
980         break;
981     }
982
983     case UntypedUse: {
984         JSValueOperand base(this, node->child1());
985         JSValueRegsTemporary result(this, Reuse, base);
986
987         JSValueRegs baseRegs = base.jsValueRegs();
988         JSValueRegs resultRegs = result.regs();
989
990         base.use();
991
992         JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
993
994         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), notCell, DontSpill, AccessType::GetPure);
995
996         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
997         break;
998     }
999
1000     default:
1001         DFG_CRASH(m_jit.graph(), node, "Bad use kind");
1002         break;
1003     } 
1004 }
1005
1006 void SpeculativeJIT::compileIn(Node* node)
1007 {
1008     SpeculateCellOperand base(this, node->child2());
1009     GPRReg baseGPR = base.gpr();
1010     
1011     if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
1012         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
1013             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
1014             
1015             GPRTemporary result(this);
1016             GPRReg resultGPR = result.gpr();
1017
1018             use(node->child1());
1019             
1020             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
1021             MacroAssembler::Label done = m_jit.label();
1022             
1023             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
1024             // we can cast it to const AtomicStringImpl* safely.
1025             auto slowPath = slowPathCall(
1026                 jump.m_jump, this, operationInOptimize,
1027                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
1028                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
1029             
1030             stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
1031             stubInfo->codeOrigin = node->origin.semantic;
1032             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
1033             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
1034 #if USE(JSVALUE32_64)
1035             stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
1036             stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
1037 #endif
1038             stubInfo->patch.usedRegisters = usedRegisters();
1039
1040             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
1041             addSlowPathGenerator(WTFMove(slowPath));
1042
1043             base.use();
1044
1045             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1046             return;
1047         }
1048     }
1049
1050     JSValueOperand key(this, node->child1());
1051     JSValueRegs regs = key.jsValueRegs();
1052         
1053     GPRFlushedCallResult result(this);
1054     GPRReg resultGPR = result.gpr();
1055         
1056     base.use();
1057     key.use();
1058         
1059     flushRegisters();
1060     callOperation(
1061         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
1062         baseGPR, regs);
1063     m_jit.exceptionCheck();
1064     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1065 }
1066
1067 void SpeculativeJIT::compileDeleteById(Node* node)
1068 {
1069     JSValueOperand value(this, node->child1());
1070     GPRFlushedCallResult result(this);
1071
1072     JSValueRegs valueRegs = value.jsValueRegs();
1073     GPRReg resultGPR = result.gpr();
1074
1075     value.use();
1076
1077     flushRegisters();
1078     callOperation(operationDeleteById, resultGPR, valueRegs, identifierUID(node->identifierNumber()));
1079     m_jit.exceptionCheck();
1080
1081     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1082 }
1083
1084 void SpeculativeJIT::compileDeleteByVal(Node* node)
1085 {
1086     JSValueOperand base(this, node->child1());
1087     JSValueOperand key(this, node->child2());
1088     GPRFlushedCallResult result(this);
1089
1090     JSValueRegs baseRegs = base.jsValueRegs();
1091     JSValueRegs keyRegs = key.jsValueRegs();
1092     GPRReg resultGPR = result.gpr();
1093
1094     base.use();
1095     key.use();
1096
1097     flushRegisters();
1098     callOperation(operationDeleteByVal, resultGPR, baseRegs, keyRegs);
1099     m_jit.exceptionCheck();
1100
1101     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1102 }
1103
1104 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1105 {
1106     unsigned branchIndexInBlock = detectPeepHoleBranch();
1107     if (branchIndexInBlock != UINT_MAX) {
1108         Node* branchNode = m_block->at(branchIndexInBlock);
1109
1110         ASSERT(node->adjustedRefCount() == 1);
1111         
1112         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1113     
1114         m_indexInBlock = branchIndexInBlock;
1115         m_currentNode = branchNode;
1116         
1117         return true;
1118     }
1119     
1120     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1121     
1122     return false;
1123 }
1124
1125 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1126 {
1127     unsigned branchIndexInBlock = detectPeepHoleBranch();
1128     if (branchIndexInBlock != UINT_MAX) {
1129         Node* branchNode = m_block->at(branchIndexInBlock);
1130
1131         ASSERT(node->adjustedRefCount() == 1);
1132         
1133         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1134     
1135         m_indexInBlock = branchIndexInBlock;
1136         m_currentNode = branchNode;
1137         
1138         return true;
1139     }
1140     
1141     nonSpeculativeNonPeepholeStrictEq(node, invert);
1142     
1143     return false;
1144 }
1145
1146 static const char* dataFormatString(DataFormat format)
1147 {
1148     // These values correspond to the DataFormat enum.
1149     const char* strings[] = {
1150         "[  ]",
1151         "[ i]",
1152         "[ d]",
1153         "[ c]",
1154         "Err!",
1155         "Err!",
1156         "Err!",
1157         "Err!",
1158         "[J ]",
1159         "[Ji]",
1160         "[Jd]",
1161         "[Jc]",
1162         "Err!",
1163         "Err!",
1164         "Err!",
1165         "Err!",
1166     };
1167     return strings[format];
1168 }
1169
1170 void SpeculativeJIT::dump(const char* label)
1171 {
1172     if (label)
1173         dataLogF("<%s>\n", label);
1174
1175     dataLogF("  gprs:\n");
1176     m_gprs.dump();
1177     dataLogF("  fprs:\n");
1178     m_fprs.dump();
1179     dataLogF("  VirtualRegisters:\n");
1180     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1181         GenerationInfo& info = m_generationInfo[i];
1182         if (info.alive())
1183             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1184         else
1185             dataLogF("    % 3d:[__][__]", i);
1186         if (info.registerFormat() == DataFormatDouble)
1187             dataLogF(":fpr%d\n", info.fpr());
1188         else if (info.registerFormat() != DataFormatNone
1189 #if USE(JSVALUE32_64)
1190             && !(info.registerFormat() & DataFormatJS)
1191 #endif
1192             ) {
1193             ASSERT(info.gpr() != InvalidGPRReg);
1194             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1195         } else
1196             dataLogF("\n");
1197     }
1198     if (label)
1199         dataLogF("</%s>\n", label);
1200 }
1201
1202 GPRTemporary::GPRTemporary()
1203     : m_jit(0)
1204     , m_gpr(InvalidGPRReg)
1205 {
1206 }
1207
1208 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1209     : m_jit(jit)
1210     , m_gpr(InvalidGPRReg)
1211 {
1212     m_gpr = m_jit->allocate();
1213 }
1214
1215 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1216     : m_jit(jit)
1217     , m_gpr(InvalidGPRReg)
1218 {
1219     m_gpr = m_jit->allocate(specific);
1220 }
1221
1222 #if USE(JSVALUE32_64)
1223 GPRTemporary::GPRTemporary(
1224     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1225     : m_jit(jit)
1226     , m_gpr(InvalidGPRReg)
1227 {
1228     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1229         m_gpr = m_jit->reuse(op1.gpr(which));
1230     else
1231         m_gpr = m_jit->allocate();
1232 }
1233 #endif // USE(JSVALUE32_64)
1234
1235 JSValueRegsTemporary::JSValueRegsTemporary() { }
1236
1237 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1238 #if USE(JSVALUE64)
1239     : m_gpr(jit)
1240 #else
1241     : m_payloadGPR(jit)
1242     , m_tagGPR(jit)
1243 #endif
1244 {
1245 }
1246
1247 #if USE(JSVALUE64)
1248 template<typename T>
1249 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord)
1250     : m_gpr(jit, Reuse, operand)
1251 {
1252 }
1253 #else
1254 template<typename T>
1255 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord resultWord)
1256 {
1257     if (resultWord == PayloadWord) {
1258         m_payloadGPR = GPRTemporary(jit, Reuse, operand);
1259         m_tagGPR = GPRTemporary(jit);
1260     } else {
1261         m_payloadGPR = GPRTemporary(jit);
1262         m_tagGPR = GPRTemporary(jit, Reuse, operand);
1263     }
1264 }
1265 #endif
1266
1267 #if USE(JSVALUE64)
1268 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1269 {
1270     m_gpr = GPRTemporary(jit, Reuse, operand);
1271 }
1272 #else
1273 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1274 {
1275     if (jit->canReuse(operand.node())) {
1276         m_payloadGPR = GPRTemporary(jit, Reuse, operand, PayloadWord);
1277         m_tagGPR = GPRTemporary(jit, Reuse, operand, TagWord);
1278     } else {
1279         m_payloadGPR = GPRTemporary(jit);
1280         m_tagGPR = GPRTemporary(jit);
1281     }
1282 }
1283 #endif
1284
1285 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1286
1287 JSValueRegs JSValueRegsTemporary::regs()
1288 {
1289 #if USE(JSVALUE64)
1290     return JSValueRegs(m_gpr.gpr());
1291 #else
1292     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1293 #endif
1294 }
1295
1296 void GPRTemporary::adopt(GPRTemporary& other)
1297 {
1298     ASSERT(!m_jit);
1299     ASSERT(m_gpr == InvalidGPRReg);
1300     ASSERT(other.m_jit);
1301     ASSERT(other.m_gpr != InvalidGPRReg);
1302     m_jit = other.m_jit;
1303     m_gpr = other.m_gpr;
1304     other.m_jit = 0;
1305     other.m_gpr = InvalidGPRReg;
1306 }
1307
1308 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1309     : m_jit(jit)
1310     , m_fpr(InvalidFPRReg)
1311 {
1312     m_fpr = m_jit->fprAllocate();
1313 }
1314
1315 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1316     : m_jit(jit)
1317     , m_fpr(InvalidFPRReg)
1318 {
1319     if (m_jit->canReuse(op1.node()))
1320         m_fpr = m_jit->reuse(op1.fpr());
1321     else
1322         m_fpr = m_jit->fprAllocate();
1323 }
1324
1325 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1326     : m_jit(jit)
1327     , m_fpr(InvalidFPRReg)
1328 {
1329     if (m_jit->canReuse(op1.node()))
1330         m_fpr = m_jit->reuse(op1.fpr());
1331     else if (m_jit->canReuse(op2.node()))
1332         m_fpr = m_jit->reuse(op2.fpr());
1333     else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1334         m_fpr = m_jit->reuse(op1.fpr());
1335     else
1336         m_fpr = m_jit->fprAllocate();
1337 }
1338
1339 #if USE(JSVALUE32_64)
1340 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1341     : m_jit(jit)
1342     , m_fpr(InvalidFPRReg)
1343 {
1344     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1345         m_fpr = m_jit->reuse(op1.fpr());
1346     else
1347         m_fpr = m_jit->fprAllocate();
1348 }
1349 #endif
1350
1351 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1352 {
1353     BasicBlock* taken = branchNode->branchData()->taken.block;
1354     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1355
1356     if (taken == nextBlock()) {
1357         condition = MacroAssembler::invert(condition);
1358         std::swap(taken, notTaken);
1359     }
1360
1361     SpeculateDoubleOperand op1(this, node->child1());
1362     SpeculateDoubleOperand op2(this, node->child2());
1363     
1364     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1365     jump(notTaken);
1366 }
1367
1368 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1369 {
1370     BasicBlock* taken = branchNode->branchData()->taken.block;
1371     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1372
1373     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1374     
1375     if (taken == nextBlock()) {
1376         condition = MacroAssembler::NotEqual;
1377         BasicBlock* tmp = taken;
1378         taken = notTaken;
1379         notTaken = tmp;
1380     }
1381
1382     SpeculateCellOperand op1(this, node->child1());
1383     SpeculateCellOperand op2(this, node->child2());
1384     
1385     GPRReg op1GPR = op1.gpr();
1386     GPRReg op2GPR = op2.gpr();
1387     
1388     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1389         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1390             speculationCheck(
1391                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1392         }
1393         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1394             speculationCheck(
1395                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1396         }
1397     } else {
1398         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1399             speculationCheck(
1400                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1401                 m_jit.branchIfNotObject(op1GPR));
1402         }
1403         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1404             m_jit.branchTest8(
1405                 MacroAssembler::NonZero, 
1406                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1407                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1408
1409         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1410             speculationCheck(
1411                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1412                 m_jit.branchIfNotObject(op2GPR));
1413         }
1414         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1415             m_jit.branchTest8(
1416                 MacroAssembler::NonZero, 
1417                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1418                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1419     }
1420
1421     branchPtr(condition, op1GPR, op2GPR, taken);
1422     jump(notTaken);
1423 }
1424
1425 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1426 {
1427     BasicBlock* taken = branchNode->branchData()->taken.block;
1428     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1429
1430     // The branch instruction will branch to the taken block.
1431     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1432     if (taken == nextBlock()) {
1433         condition = JITCompiler::invert(condition);
1434         BasicBlock* tmp = taken;
1435         taken = notTaken;
1436         notTaken = tmp;
1437     }
1438
1439     if (node->child1()->isInt32Constant()) {
1440         int32_t imm = node->child1()->asInt32();
1441         SpeculateBooleanOperand op2(this, node->child2());
1442         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1443     } else if (node->child2()->isInt32Constant()) {
1444         SpeculateBooleanOperand op1(this, node->child1());
1445         int32_t imm = node->child2()->asInt32();
1446         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1447     } else {
1448         SpeculateBooleanOperand op1(this, node->child1());
1449         SpeculateBooleanOperand op2(this, node->child2());
1450         branch32(condition, op1.gpr(), op2.gpr(), taken);
1451     }
1452
1453     jump(notTaken);
1454 }
1455
1456 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1457 {
1458     BasicBlock* taken = branchNode->branchData()->taken.block;
1459     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1460
1461     // The branch instruction will branch to the taken block.
1462     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1463     if (taken == nextBlock()) {
1464         condition = JITCompiler::invert(condition);
1465         BasicBlock* tmp = taken;
1466         taken = notTaken;
1467         notTaken = tmp;
1468     }
1469
1470     if (node->child1()->isInt32Constant()) {
1471         int32_t imm = node->child1()->asInt32();
1472         SpeculateInt32Operand op2(this, node->child2());
1473         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1474     } else if (node->child2()->isInt32Constant()) {
1475         SpeculateInt32Operand op1(this, node->child1());
1476         int32_t imm = node->child2()->asInt32();
1477         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1478     } else {
1479         SpeculateInt32Operand op1(this, node->child1());
1480         SpeculateInt32Operand op2(this, node->child2());
1481         branch32(condition, op1.gpr(), op2.gpr(), taken);
1482     }
1483
1484     jump(notTaken);
1485 }
1486
1487 // Returns true if the compare is fused with a subsequent branch.
1488 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1489 {
1490     // Fused compare & branch.
1491     unsigned branchIndexInBlock = detectPeepHoleBranch();
1492     if (branchIndexInBlock != UINT_MAX) {
1493         Node* branchNode = m_block->at(branchIndexInBlock);
1494
1495         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1496         // so can be no intervening nodes to also reference the compare. 
1497         ASSERT(node->adjustedRefCount() == 1);
1498
1499         if (node->isBinaryUseKind(Int32Use))
1500             compilePeepHoleInt32Branch(node, branchNode, condition);
1501 #if USE(JSVALUE64)
1502         else if (node->isBinaryUseKind(Int52RepUse))
1503             compilePeepHoleInt52Branch(node, branchNode, condition);
1504 #endif // USE(JSVALUE64)
1505         else if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1506             // Use non-peephole comparison, for now.
1507             return false;
1508         } else if (node->isBinaryUseKind(DoubleRepUse))
1509             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1510         else if (node->op() == CompareEq) {
1511             if (node->isBinaryUseKind(BooleanUse))
1512                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1513             else if (node->isBinaryUseKind(SymbolUse))
1514                 compilePeepHoleSymbolEquality(node, branchNode);
1515             else if (node->isBinaryUseKind(ObjectUse))
1516                 compilePeepHoleObjectEquality(node, branchNode);
1517             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1518                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1519             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1520                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1521             else if (!needsTypeCheck(node->child1(), SpecOther))
1522                 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1523             else if (!needsTypeCheck(node->child2(), SpecOther))
1524                 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1525             else {
1526                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1527                 return true;
1528             }
1529         } else {
1530             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1531             return true;
1532         }
1533
1534         use(node->child1());
1535         use(node->child2());
1536         m_indexInBlock = branchIndexInBlock;
1537         m_currentNode = branchNode;
1538         return true;
1539     }
1540     return false;
1541 }
1542
1543 void SpeculativeJIT::noticeOSRBirth(Node* node)
1544 {
1545     if (!node->hasVirtualRegister())
1546         return;
1547     
1548     VirtualRegister virtualRegister = node->virtualRegister();
1549     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1550     
1551     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1552 }
1553
1554 void SpeculativeJIT::compileMovHint(Node* node)
1555 {
1556     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1557     
1558     Node* child = node->child1().node();
1559     noticeOSRBirth(child);
1560     
1561     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1562 }
1563
1564 void SpeculativeJIT::bail(AbortReason reason)
1565 {
1566     if (verboseCompilationEnabled())
1567         dataLog("Bailing compilation.\n");
1568     m_compileOkay = true;
1569     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1570     clearGenerationInfo();
1571 }
1572
1573 void SpeculativeJIT::compileCurrentBlock()
1574 {
1575     ASSERT(m_compileOkay);
1576     
1577     if (!m_block)
1578         return;
1579     
1580     ASSERT(m_block->isReachable);
1581     
1582     m_jit.blockHeads()[m_block->index] = m_jit.label();
1583
1584     if (!m_block->intersectionOfCFAHasVisited) {
1585         // Don't generate code for basic blocks that are unreachable according to CFA.
1586         // But to be sure that nobody has generated a jump to this block, drop in a
1587         // breakpoint here.
1588         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1589         return;
1590     }
1591
1592     m_stream->appendAndLog(VariableEvent::reset());
1593     
1594     m_jit.jitAssertHasValidCallFrame();
1595     m_jit.jitAssertTagsInPlace();
1596     m_jit.jitAssertArgumentCountSane();
1597
1598     m_state.reset();
1599     m_state.beginBasicBlock(m_block);
1600     
1601     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1602         int operand = m_block->variablesAtHead.operandForIndex(i);
1603         Node* node = m_block->variablesAtHead[i];
1604         if (!node)
1605             continue; // No need to record dead SetLocal's.
1606         
1607         VariableAccessData* variable = node->variableAccessData();
1608         DataFormat format;
1609         if (!node->refCount())
1610             continue; // No need to record dead SetLocal's.
1611         format = dataFormatFor(variable->flushFormat());
1612         m_stream->appendAndLog(
1613             VariableEvent::setLocal(
1614                 VirtualRegister(operand),
1615                 variable->machineLocal(),
1616                 format));
1617     }
1618
1619     m_origin = NodeOrigin();
1620     
1621     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1622         m_currentNode = m_block->at(m_indexInBlock);
1623         
1624         // We may have hit a contradiction that the CFA was aware of but that the JIT
1625         // didn't cause directly.
1626         if (!m_state.isValid()) {
1627             bail(DFGBailedAtTopOfBlock);
1628             return;
1629         }
1630
1631         m_interpreter.startExecuting();
1632         m_interpreter.executeKnownEdgeTypes(m_currentNode);
1633         m_jit.setForNode(m_currentNode);
1634         m_origin = m_currentNode->origin;
1635         if (validationEnabled())
1636             m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1637         m_lastGeneratedNode = m_currentNode->op();
1638         
1639         ASSERT(m_currentNode->shouldGenerate());
1640         
1641         if (verboseCompilationEnabled()) {
1642             dataLogF(
1643                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1644                 (int)m_currentNode->index(),
1645                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1646             dataLog("\n");
1647         }
1648
1649         if (Options::validateDFGExceptionHandling() && (mayExit(m_jit.graph(), m_currentNode) != DoesNotExit || m_currentNode->isTerminal()))
1650             m_jit.jitReleaseAssertNoException();
1651
1652         m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.label(), m_origin.semantic);
1653
1654         compile(m_currentNode);
1655         
1656         if (belongsInMinifiedGraph(m_currentNode->op()))
1657             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1658         
1659 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1660         m_jit.clearRegisterAllocationOffsets();
1661 #endif
1662         
1663         if (!m_compileOkay) {
1664             bail(DFGBailedAtEndOfNode);
1665             return;
1666         }
1667         
1668         // Make sure that the abstract state is rematerialized for the next node.
1669         m_interpreter.executeEffects(m_indexInBlock);
1670     }
1671     
1672     // Perform the most basic verification that children have been used correctly.
1673     if (!ASSERT_DISABLED) {
1674         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1675             GenerationInfo& info = m_generationInfo[index];
1676             RELEASE_ASSERT(!info.alive());
1677         }
1678     }
1679 }
1680
1681 // If we are making type predictions about our arguments then
1682 // we need to check that they are correct on function entry.
1683 void SpeculativeJIT::checkArgumentTypes()
1684 {
1685     ASSERT(!m_currentNode);
1686     m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1687
1688     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1689         Node* node = m_jit.graph().m_arguments[i];
1690         if (!node) {
1691             // The argument is dead. We don't do any checks for such arguments.
1692             continue;
1693         }
1694         
1695         ASSERT(node->op() == SetArgument);
1696         ASSERT(node->shouldGenerate());
1697
1698         VariableAccessData* variableAccessData = node->variableAccessData();
1699         FlushFormat format = variableAccessData->flushFormat();
1700         
1701         if (format == FlushedJSValue)
1702             continue;
1703         
1704         VirtualRegister virtualRegister = variableAccessData->local();
1705
1706         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1707         
1708 #if USE(JSVALUE64)
1709         switch (format) {
1710         case FlushedInt32: {
1711             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1712             break;
1713         }
1714         case FlushedBoolean: {
1715             GPRTemporary temp(this);
1716             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1717             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1718             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1719             break;
1720         }
1721         case FlushedCell: {
1722             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1723             break;
1724         }
1725         default:
1726             RELEASE_ASSERT_NOT_REACHED();
1727             break;
1728         }
1729 #else
1730         switch (format) {
1731         case FlushedInt32: {
1732             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1733             break;
1734         }
1735         case FlushedBoolean: {
1736             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1737             break;
1738         }
1739         case FlushedCell: {
1740             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1741             break;
1742         }
1743         default:
1744             RELEASE_ASSERT_NOT_REACHED();
1745             break;
1746         }
1747 #endif
1748     }
1749
1750     m_origin = NodeOrigin();
1751 }
1752
1753 bool SpeculativeJIT::compile()
1754 {
1755     checkArgumentTypes();
1756     
1757     ASSERT(!m_currentNode);
1758     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1759         m_jit.setForBlockIndex(blockIndex);
1760         m_block = m_jit.graph().block(blockIndex);
1761         compileCurrentBlock();
1762     }
1763     linkBranches();
1764     return true;
1765 }
1766
1767 void SpeculativeJIT::createOSREntries()
1768 {
1769     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1770         BasicBlock* block = m_jit.graph().block(blockIndex);
1771         if (!block)
1772             continue;
1773         if (!block->isOSRTarget)
1774             continue;
1775         
1776         // Currently we don't have OSR entry trampolines. We could add them
1777         // here if need be.
1778         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1779     }
1780 }
1781
1782 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1783 {
1784     unsigned osrEntryIndex = 0;
1785     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1786         BasicBlock* block = m_jit.graph().block(blockIndex);
1787         if (!block)
1788             continue;
1789         if (!block->isOSRTarget)
1790             continue;
1791         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1792     }
1793     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1794     
1795     if (verboseCompilationEnabled()) {
1796         DumpContext dumpContext;
1797         dataLog("OSR Entries:\n");
1798         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1799             dataLog("    ", inContext(entryData, &dumpContext), "\n");
1800         if (!dumpContext.isEmpty())
1801             dumpContext.dump(WTF::dataFile());
1802     }
1803 }
1804
1805 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1806 {
1807     Edge child3 = m_jit.graph().varArgChild(node, 2);
1808     Edge child4 = m_jit.graph().varArgChild(node, 3);
1809
1810     ArrayMode arrayMode = node->arrayMode();
1811     
1812     GPRReg baseReg = base.gpr();
1813     GPRReg propertyReg = property.gpr();
1814     
1815     SpeculateDoubleOperand value(this, child3);
1816
1817     FPRReg valueReg = value.fpr();
1818     
1819     DFG_TYPE_CHECK(
1820         JSValueRegs(), child3, SpecFullRealNumber,
1821         m_jit.branchDouble(
1822             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1823     
1824     if (!m_compileOkay)
1825         return;
1826     
1827     StorageOperand storage(this, child4);
1828     GPRReg storageReg = storage.gpr();
1829
1830     if (node->op() == PutByValAlias) {
1831         // Store the value to the array.
1832         GPRReg propertyReg = property.gpr();
1833         FPRReg valueReg = value.fpr();
1834         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1835         
1836         noResult(m_currentNode);
1837         return;
1838     }
1839     
1840     GPRTemporary temporary;
1841     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1842
1843     MacroAssembler::Jump slowCase;
1844     
1845     if (arrayMode.isInBounds()) {
1846         speculationCheck(
1847             OutOfBounds, JSValueRegs(), 0,
1848             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1849     } else {
1850         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1851         
1852         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1853         
1854         if (!arrayMode.isOutOfBounds())
1855             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1856         
1857         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1858         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1859         
1860         inBounds.link(&m_jit);
1861     }
1862     
1863     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1864
1865     base.use();
1866     property.use();
1867     value.use();
1868     storage.use();
1869     
1870     if (arrayMode.isOutOfBounds()) {
1871         addSlowPathGenerator(
1872             slowPathCall(
1873                 slowCase, this,
1874                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1875                 NoResult, baseReg, propertyReg, valueReg));
1876     }
1877
1878     noResult(m_currentNode, UseChildrenCalledExplicitly);
1879 }
1880
1881 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1882 {
1883     SpeculateCellOperand string(this, node->child1());
1884     SpeculateStrictInt32Operand index(this, node->child2());
1885     StorageOperand storage(this, node->child3());
1886
1887     GPRReg stringReg = string.gpr();
1888     GPRReg indexReg = index.gpr();
1889     GPRReg storageReg = storage.gpr();
1890     
1891     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1892
1893     // unsigned comparison so we can filter out negative indices and indices that are too large
1894     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1895
1896     GPRTemporary scratch(this);
1897     GPRReg scratchReg = scratch.gpr();
1898
1899     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1900
1901     // Load the character into scratchReg
1902     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1903
1904     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1905     JITCompiler::Jump cont8Bit = m_jit.jump();
1906
1907     is16Bit.link(&m_jit);
1908
1909     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1910
1911     cont8Bit.link(&m_jit);
1912
1913     int32Result(scratchReg, m_currentNode);
1914 }
1915
1916 void SpeculativeJIT::compileGetByValOnString(Node* node)
1917 {
1918     SpeculateCellOperand base(this, node->child1());
1919     SpeculateStrictInt32Operand property(this, node->child2());
1920     StorageOperand storage(this, node->child3());
1921     GPRReg baseReg = base.gpr();
1922     GPRReg propertyReg = property.gpr();
1923     GPRReg storageReg = storage.gpr();
1924
1925     GPRTemporary scratch(this);
1926     GPRReg scratchReg = scratch.gpr();
1927 #if USE(JSVALUE32_64)
1928     GPRTemporary resultTag;
1929     GPRReg resultTagReg = InvalidGPRReg;
1930     if (node->arrayMode().isOutOfBounds()) {
1931         GPRTemporary realResultTag(this);
1932         resultTag.adopt(realResultTag);
1933         resultTagReg = resultTag.gpr();
1934     }
1935 #endif
1936
1937     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1938
1939     // unsigned comparison so we can filter out negative indices and indices that are too large
1940     JITCompiler::Jump outOfBounds = m_jit.branch32(
1941         MacroAssembler::AboveOrEqual, propertyReg,
1942         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1943     if (node->arrayMode().isInBounds())
1944         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1945
1946     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1947
1948     // Load the character into scratchReg
1949     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1950
1951     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1952     JITCompiler::Jump cont8Bit = m_jit.jump();
1953
1954     is16Bit.link(&m_jit);
1955
1956     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1957
1958     JITCompiler::Jump bigCharacter =
1959         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1960
1961     // 8 bit string values don't need the isASCII check.
1962     cont8Bit.link(&m_jit);
1963
1964     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1965     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1966     m_jit.loadPtr(scratchReg, scratchReg);
1967
1968     addSlowPathGenerator(
1969         slowPathCall(
1970             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1971
1972     if (node->arrayMode().isOutOfBounds()) {
1973 #if USE(JSVALUE32_64)
1974         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1975 #endif
1976
1977         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1978         bool prototypeChainIsSane = false;
1979         if (globalObject->stringPrototypeChainIsSane()) {
1980             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
1981             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
1982             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
1983             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
1984             // indexed properties either.
1985             // https://bugs.webkit.org/show_bug.cgi?id=144668
1986             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
1987             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
1988             prototypeChainIsSane = globalObject->stringPrototypeChainIsSane();
1989         }
1990         if (prototypeChainIsSane) {
1991             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
1992             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
1993             
1994 #if USE(JSVALUE64)
1995             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1996                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
1997 #else
1998             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1999                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
2000                 baseReg, propertyReg));
2001 #endif
2002         } else {
2003 #if USE(JSVALUE64)
2004             addSlowPathGenerator(
2005                 slowPathCall(
2006                     outOfBounds, this, operationGetByValStringInt,
2007                     scratchReg, baseReg, propertyReg));
2008 #else
2009             addSlowPathGenerator(
2010                 slowPathCall(
2011                     outOfBounds, this, operationGetByValStringInt,
2012                     resultTagReg, scratchReg, baseReg, propertyReg));
2013 #endif
2014         }
2015         
2016 #if USE(JSVALUE64)
2017         jsValueResult(scratchReg, m_currentNode);
2018 #else
2019         jsValueResult(resultTagReg, scratchReg, m_currentNode);
2020 #endif
2021     } else
2022         cellResult(scratchReg, m_currentNode);
2023 }
2024
2025 void SpeculativeJIT::compileFromCharCode(Node* node)
2026 {
2027     Edge& child = node->child1();
2028     if (child.useKind() == UntypedUse) {
2029         JSValueOperand opr(this, child);
2030         JSValueRegs oprRegs = opr.jsValueRegs();
2031 #if USE(JSVALUE64)
2032         GPRTemporary result(this);
2033         JSValueRegs resultRegs = JSValueRegs(result.gpr());
2034 #else
2035         GPRTemporary resultTag(this);
2036         GPRTemporary resultPayload(this);
2037         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2038 #endif
2039         flushRegisters();
2040         callOperation(operationStringFromCharCodeUntyped, resultRegs, oprRegs);
2041         m_jit.exceptionCheck();
2042         
2043         jsValueResult(resultRegs, node);
2044         return;
2045     }
2046
2047     SpeculateStrictInt32Operand property(this, child);
2048     GPRReg propertyReg = property.gpr();
2049     GPRTemporary smallStrings(this);
2050     GPRTemporary scratch(this);
2051     GPRReg scratchReg = scratch.gpr();
2052     GPRReg smallStringsReg = smallStrings.gpr();
2053
2054     JITCompiler::JumpList slowCases;
2055     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2056     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2057     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2058
2059     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2060     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2061     cellResult(scratchReg, m_currentNode);
2062 }
2063
2064 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2065 {
2066     VirtualRegister virtualRegister = node->virtualRegister();
2067     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2068
2069     switch (info.registerFormat()) {
2070     case DataFormatStorage:
2071         RELEASE_ASSERT_NOT_REACHED();
2072
2073     case DataFormatBoolean:
2074     case DataFormatCell:
2075         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2076         return GeneratedOperandTypeUnknown;
2077
2078     case DataFormatNone:
2079     case DataFormatJSCell:
2080     case DataFormatJS:
2081     case DataFormatJSBoolean:
2082     case DataFormatJSDouble:
2083         return GeneratedOperandJSValue;
2084
2085     case DataFormatJSInt32:
2086     case DataFormatInt32:
2087         return GeneratedOperandInteger;
2088
2089     default:
2090         RELEASE_ASSERT_NOT_REACHED();
2091         return GeneratedOperandTypeUnknown;
2092     }
2093 }
2094
2095 void SpeculativeJIT::compileValueToInt32(Node* node)
2096 {
2097     switch (node->child1().useKind()) {
2098 #if USE(JSVALUE64)
2099     case Int52RepUse: {
2100         SpeculateStrictInt52Operand op1(this, node->child1());
2101         GPRTemporary result(this, Reuse, op1);
2102         GPRReg op1GPR = op1.gpr();
2103         GPRReg resultGPR = result.gpr();
2104         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
2105         int32Result(resultGPR, node, DataFormatInt32);
2106         return;
2107     }
2108 #endif // USE(JSVALUE64)
2109         
2110     case DoubleRepUse: {
2111         GPRTemporary result(this);
2112         SpeculateDoubleOperand op1(this, node->child1());
2113         FPRReg fpr = op1.fpr();
2114         GPRReg gpr = result.gpr();
2115         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2116         
2117         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
2118         
2119         int32Result(gpr, node);
2120         return;
2121     }
2122     
2123     case NumberUse:
2124     case NotCellUse: {
2125         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2126         case GeneratedOperandInteger: {
2127             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2128             GPRTemporary result(this, Reuse, op1);
2129             m_jit.move(op1.gpr(), result.gpr());
2130             int32Result(result.gpr(), node, op1.format());
2131             return;
2132         }
2133         case GeneratedOperandJSValue: {
2134             GPRTemporary result(this);
2135 #if USE(JSVALUE64)
2136             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2137
2138             GPRReg gpr = op1.gpr();
2139             GPRReg resultGpr = result.gpr();
2140             FPRTemporary tempFpr(this);
2141             FPRReg fpr = tempFpr.fpr();
2142
2143             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2144             JITCompiler::JumpList converted;
2145
2146             if (node->child1().useKind() == NumberUse) {
2147                 DFG_TYPE_CHECK(
2148                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
2149                     m_jit.branchTest64(
2150                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2151             } else {
2152                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2153                 
2154                 DFG_TYPE_CHECK(
2155                     JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
2156                 
2157                 // It's not a cell: so true turns into 1 and all else turns into 0.
2158                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2159                 converted.append(m_jit.jump());
2160                 
2161                 isNumber.link(&m_jit);
2162             }
2163
2164             // First, if we get here we have a double encoded as a JSValue
2165             unboxDouble(gpr, resultGpr, fpr);
2166
2167             silentSpillAllRegisters(resultGpr);
2168             callOperation(toInt32, resultGpr, fpr);
2169             silentFillAllRegisters(resultGpr);
2170
2171             converted.append(m_jit.jump());
2172
2173             isInteger.link(&m_jit);
2174             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2175
2176             converted.link(&m_jit);
2177 #else
2178             Node* childNode = node->child1().node();
2179             VirtualRegister virtualRegister = childNode->virtualRegister();
2180             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2181
2182             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2183
2184             GPRReg payloadGPR = op1.payloadGPR();
2185             GPRReg resultGpr = result.gpr();
2186         
2187             JITCompiler::JumpList converted;
2188
2189             if (info.registerFormat() == DataFormatJSInt32)
2190                 m_jit.move(payloadGPR, resultGpr);
2191             else {
2192                 GPRReg tagGPR = op1.tagGPR();
2193                 FPRTemporary tempFpr(this);
2194                 FPRReg fpr = tempFpr.fpr();
2195                 FPRTemporary scratch(this);
2196
2197                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2198
2199                 if (node->child1().useKind() == NumberUse) {
2200                     DFG_TYPE_CHECK(
2201                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2202                         m_jit.branch32(
2203                             MacroAssembler::AboveOrEqual, tagGPR,
2204                             TrustedImm32(JSValue::LowestTag)));
2205                 } else {
2206                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2207                     
2208                     DFG_TYPE_CHECK(
2209                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2210                         m_jit.branchIfCell(op1.jsValueRegs()));
2211                     
2212                     // It's not a cell: so true turns into 1 and all else turns into 0.
2213                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2214                     m_jit.move(TrustedImm32(0), resultGpr);
2215                     converted.append(m_jit.jump());
2216                     
2217                     isBoolean.link(&m_jit);
2218                     m_jit.move(payloadGPR, resultGpr);
2219                     converted.append(m_jit.jump());
2220                     
2221                     isNumber.link(&m_jit);
2222                 }
2223
2224                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2225
2226                 silentSpillAllRegisters(resultGpr);
2227                 callOperation(toInt32, resultGpr, fpr);
2228                 silentFillAllRegisters(resultGpr);
2229
2230                 converted.append(m_jit.jump());
2231
2232                 isInteger.link(&m_jit);
2233                 m_jit.move(payloadGPR, resultGpr);
2234
2235                 converted.link(&m_jit);
2236             }
2237 #endif
2238             int32Result(resultGpr, node);
2239             return;
2240         }
2241         case GeneratedOperandTypeUnknown:
2242             RELEASE_ASSERT(!m_compileOkay);
2243             return;
2244         }
2245         RELEASE_ASSERT_NOT_REACHED();
2246         return;
2247     }
2248     
2249     default:
2250         ASSERT(!m_compileOkay);
2251         return;
2252     }
2253 }
2254
2255 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2256 {
2257     if (doesOverflow(node->arithMode())) {
2258         if (enableInt52()) {
2259             SpeculateInt32Operand op1(this, node->child1());
2260             GPRTemporary result(this, Reuse, op1);
2261             m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr());
2262             strictInt52Result(result.gpr(), node);
2263             return;
2264         }
2265         SpeculateInt32Operand op1(this, node->child1());
2266         FPRTemporary result(this);
2267             
2268         GPRReg inputGPR = op1.gpr();
2269         FPRReg outputFPR = result.fpr();
2270             
2271         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2272             
2273         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2274         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2275         positive.link(&m_jit);
2276             
2277         doubleResult(outputFPR, node);
2278         return;
2279     }
2280     
2281     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2282
2283     SpeculateInt32Operand op1(this, node->child1());
2284     GPRTemporary result(this);
2285
2286     m_jit.move(op1.gpr(), result.gpr());
2287
2288     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2289
2290     int32Result(result.gpr(), node, op1.format());
2291 }
2292
2293 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2294 {
2295     SpeculateDoubleOperand op1(this, node->child1());
2296     FPRTemporary scratch(this);
2297     GPRTemporary result(this);
2298     
2299     FPRReg valueFPR = op1.fpr();
2300     FPRReg scratchFPR = scratch.fpr();
2301     GPRReg resultGPR = result.gpr();
2302
2303     JITCompiler::JumpList failureCases;
2304     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2305     m_jit.branchConvertDoubleToInt32(
2306         valueFPR, resultGPR, failureCases, scratchFPR,
2307         shouldCheckNegativeZero(node->arithMode()));
2308     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2309
2310     int32Result(resultGPR, node);
2311 }
2312
2313 void SpeculativeJIT::compileDoubleRep(Node* node)
2314 {
2315     switch (node->child1().useKind()) {
2316     case RealNumberUse: {
2317         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2318         FPRTemporary result(this);
2319         
2320         JSValueRegs op1Regs = op1.jsValueRegs();
2321         FPRReg resultFPR = result.fpr();
2322         
2323 #if USE(JSVALUE64)
2324         GPRTemporary temp(this);
2325         GPRReg tempGPR = temp.gpr();
2326         m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR);
2327 #else
2328         FPRTemporary temp(this);
2329         FPRReg tempFPR = temp.fpr();
2330         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2331 #endif
2332         
2333         JITCompiler::Jump done = m_jit.branchDouble(
2334             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2335         
2336         DFG_TYPE_CHECK(
2337             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2338         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2339         
2340         done.link(&m_jit);
2341         
2342         doubleResult(resultFPR, node);
2343         return;
2344     }
2345     
2346     case NotCellUse:
2347     case NumberUse: {
2348         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2349
2350         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2351         if (isInt32Speculation(possibleTypes)) {
2352             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2353             FPRTemporary result(this);
2354             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2355             doubleResult(result.fpr(), node);
2356             return;
2357         }
2358
2359         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2360         FPRTemporary result(this);
2361
2362 #if USE(JSVALUE64)
2363         GPRTemporary temp(this);
2364
2365         GPRReg op1GPR = op1.gpr();
2366         GPRReg tempGPR = temp.gpr();
2367         FPRReg resultFPR = result.fpr();
2368         JITCompiler::JumpList done;
2369
2370         JITCompiler::Jump isInteger = m_jit.branch64(
2371             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2372
2373         if (node->child1().useKind() == NotCellUse) {
2374             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2375             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2376
2377             static const double zero = 0;
2378             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2379
2380             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2381             done.append(isNull);
2382
2383             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2384                 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2385
2386             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2387             static const double one = 1;
2388             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2389             done.append(m_jit.jump());
2390             done.append(isFalse);
2391
2392             isUndefined.link(&m_jit);
2393             static const double NaN = PNaN;
2394             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2395             done.append(m_jit.jump());
2396
2397             isNumber.link(&m_jit);
2398         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2399             typeCheck(
2400                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2401                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2402         }
2403
2404         unboxDouble(op1GPR, tempGPR, resultFPR);
2405         done.append(m_jit.jump());
2406     
2407         isInteger.link(&m_jit);
2408         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2409         done.link(&m_jit);
2410 #else // USE(JSVALUE64) -> this is the 32_64 case
2411         FPRTemporary temp(this);
2412     
2413         GPRReg op1TagGPR = op1.tagGPR();
2414         GPRReg op1PayloadGPR = op1.payloadGPR();
2415         FPRReg tempFPR = temp.fpr();
2416         FPRReg resultFPR = result.fpr();
2417         JITCompiler::JumpList done;
2418     
2419         JITCompiler::Jump isInteger = m_jit.branch32(
2420             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2421
2422         if (node->child1().useKind() == NotCellUse) {
2423             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2424             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2425
2426             static const double zero = 0;
2427             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2428
2429             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2430             done.append(isNull);
2431
2432             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2433
2434             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2435             static const double one = 1;
2436             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2437             done.append(m_jit.jump());
2438             done.append(isFalse);
2439
2440             isUndefined.link(&m_jit);
2441             static const double NaN = PNaN;
2442             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2443             done.append(m_jit.jump());
2444
2445             isNumber.link(&m_jit);
2446         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2447             typeCheck(
2448                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2449                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2450         }
2451
2452         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2453         done.append(m_jit.jump());
2454     
2455         isInteger.link(&m_jit);
2456         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2457         done.link(&m_jit);
2458 #endif // USE(JSVALUE64)
2459     
2460         doubleResult(resultFPR, node);
2461         return;
2462     }
2463         
2464 #if USE(JSVALUE64)
2465     case Int52RepUse: {
2466         SpeculateStrictInt52Operand value(this, node->child1());
2467         FPRTemporary result(this);
2468         
2469         GPRReg valueGPR = value.gpr();
2470         FPRReg resultFPR = result.fpr();
2471
2472         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2473         
2474         doubleResult(resultFPR, node);
2475         return;
2476     }
2477 #endif // USE(JSVALUE64)
2478         
2479     default:
2480         RELEASE_ASSERT_NOT_REACHED();
2481         return;
2482     }
2483 }
2484
2485 void SpeculativeJIT::compileValueRep(Node* node)
2486 {
2487     switch (node->child1().useKind()) {
2488     case DoubleRepUse: {
2489         SpeculateDoubleOperand value(this, node->child1());
2490         JSValueRegsTemporary result(this);
2491         
2492         FPRReg valueFPR = value.fpr();
2493         JSValueRegs resultRegs = result.regs();
2494         
2495         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2496         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2497         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2498         // local was purified.
2499         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2500             m_jit.purifyNaN(valueFPR);
2501
2502         boxDouble(valueFPR, resultRegs);
2503         
2504         jsValueResult(resultRegs, node);
2505         return;
2506     }
2507         
2508 #if USE(JSVALUE64)
2509     case Int52RepUse: {
2510         SpeculateStrictInt52Operand value(this, node->child1());
2511         GPRTemporary result(this);
2512         
2513         GPRReg valueGPR = value.gpr();
2514         GPRReg resultGPR = result.gpr();
2515         
2516         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2517         
2518         jsValueResult(resultGPR, node);
2519         return;
2520     }
2521 #endif // USE(JSVALUE64)
2522         
2523     default:
2524         RELEASE_ASSERT_NOT_REACHED();
2525         return;
2526     }
2527 }
2528
2529 static double clampDoubleToByte(double d)
2530 {
2531     d += 0.5;
2532     if (!(d > 0))
2533         d = 0;
2534     else if (d > 255)
2535         d = 255;
2536     return d;
2537 }
2538
2539 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2540 {
2541     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2542     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2543     jit.xorPtr(result, result);
2544     MacroAssembler::Jump clamped = jit.jump();
2545     tooBig.link(&jit);
2546     jit.move(JITCompiler::TrustedImm32(255), result);
2547     clamped.link(&jit);
2548     inBounds.link(&jit);
2549 }
2550
2551 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2552 {
2553     // Unordered compare so we pick up NaN
2554     static const double zero = 0;
2555     static const double byteMax = 255;
2556     static const double half = 0.5;
2557     jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2558     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2559     jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2560     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2561     
2562     jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2563     // FIXME: This should probably just use a floating point round!
2564     // https://bugs.webkit.org/show_bug.cgi?id=72054
2565     jit.addDouble(source, scratch);
2566     jit.truncateDoubleToInt32(scratch, result);   
2567     MacroAssembler::Jump truncatedInt = jit.jump();
2568     
2569     tooSmall.link(&jit);
2570     jit.xorPtr(result, result);
2571     MacroAssembler::Jump zeroed = jit.jump();
2572     
2573     tooBig.link(&jit);
2574     jit.move(JITCompiler::TrustedImm32(255), result);
2575     
2576     truncatedInt.link(&jit);
2577     zeroed.link(&jit);
2578
2579 }
2580
2581 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2582 {
2583     if (node->op() == PutByValAlias)
2584         return JITCompiler::Jump();
2585     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2586         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2587     if (view) {
2588         uint32_t length = view->length();
2589         Node* indexNode = m_jit.graph().child(node, 1).node();
2590         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2591             return JITCompiler::Jump();
2592         return m_jit.branch32(
2593             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2594     }
2595     return m_jit.branch32(
2596         MacroAssembler::AboveOrEqual, indexGPR,
2597         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2598 }
2599
2600 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2601 {
2602     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2603     if (!jump.isSet())
2604         return;
2605     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2606 }
2607
2608 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2609 {
2610     ASSERT(isInt(type));
2611     
2612     SpeculateCellOperand base(this, node->child1());
2613     SpeculateStrictInt32Operand property(this, node->child2());
2614     StorageOperand storage(this, node->child3());
2615
2616     GPRReg baseReg = base.gpr();
2617     GPRReg propertyReg = property.gpr();
2618     GPRReg storageReg = storage.gpr();
2619
2620     GPRTemporary result(this);
2621     GPRReg resultReg = result.gpr();
2622
2623     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2624
2625     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2626     switch (elementSize(type)) {
2627     case 1:
2628         if (isSigned(type))
2629             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2630         else
2631             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2632         break;
2633     case 2:
2634         if (isSigned(type))
2635             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2636         else
2637             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2638         break;
2639     case 4:
2640         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2641         break;
2642     default:
2643         CRASH();
2644     }
2645     if (elementSize(type) < 4 || isSigned(type)) {
2646         int32Result(resultReg, node);
2647         return;
2648     }
2649     
2650     ASSERT(elementSize(type) == 4 && !isSigned(type));
2651     if (node->shouldSpeculateInt32()) {
2652         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2653         int32Result(resultReg, node);
2654         return;
2655     }
2656     
2657 #if USE(JSVALUE64)
2658     if (node->shouldSpeculateAnyInt()) {
2659         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2660         strictInt52Result(resultReg, node);
2661         return;
2662     }
2663 #endif
2664     
2665     FPRTemporary fresult(this);
2666     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2667     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2668     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2669     positive.link(&m_jit);
2670     doubleResult(fresult.fpr(), node);
2671 }
2672
2673 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2674 {
2675     ASSERT(isInt(type));
2676     
2677     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2678     GPRReg storageReg = storage.gpr();
2679     
2680     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2681     
2682     GPRTemporary value;
2683     GPRReg valueGPR = InvalidGPRReg;
2684     
2685     if (valueUse->isConstant()) {
2686         JSValue jsValue = valueUse->asJSValue();
2687         if (!jsValue.isNumber()) {
2688             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2689             noResult(node);
2690             return;
2691         }
2692         double d = jsValue.asNumber();
2693         if (isClamped(type)) {
2694             ASSERT(elementSize(type) == 1);
2695             d = clampDoubleToByte(d);
2696         }
2697         GPRTemporary scratch(this);
2698         GPRReg scratchReg = scratch.gpr();
2699         m_jit.move(Imm32(toInt32(d)), scratchReg);
2700         value.adopt(scratch);
2701         valueGPR = scratchReg;
2702     } else {
2703         switch (valueUse.useKind()) {
2704         case Int32Use: {
2705             SpeculateInt32Operand valueOp(this, valueUse);
2706             GPRTemporary scratch(this);
2707             GPRReg scratchReg = scratch.gpr();
2708             m_jit.move(valueOp.gpr(), scratchReg);
2709             if (isClamped(type)) {
2710                 ASSERT(elementSize(type) == 1);
2711                 compileClampIntegerToByte(m_jit, scratchReg);
2712             }
2713             value.adopt(scratch);
2714             valueGPR = scratchReg;
2715             break;
2716         }
2717             
2718 #if USE(JSVALUE64)
2719         case Int52RepUse: {
2720             SpeculateStrictInt52Operand valueOp(this, valueUse);
2721             GPRTemporary scratch(this);
2722             GPRReg scratchReg = scratch.gpr();
2723             m_jit.move(valueOp.gpr(), scratchReg);
2724             if (isClamped(type)) {
2725                 ASSERT(elementSize(type) == 1);
2726                 MacroAssembler::Jump inBounds = m_jit.branch64(
2727                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2728                 MacroAssembler::Jump tooBig = m_jit.branch64(
2729                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2730                 m_jit.move(TrustedImm32(0), scratchReg);
2731                 MacroAssembler::Jump clamped = m_jit.jump();
2732                 tooBig.link(&m_jit);
2733                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2734                 clamped.link(&m_jit);
2735                 inBounds.link(&m_jit);
2736             }
2737             value.adopt(scratch);
2738             valueGPR = scratchReg;
2739             break;
2740         }
2741 #endif // USE(JSVALUE64)
2742             
2743         case DoubleRepUse: {
2744             if (isClamped(type)) {
2745                 ASSERT(elementSize(type) == 1);
2746                 SpeculateDoubleOperand valueOp(this, valueUse);
2747                 GPRTemporary result(this);
2748                 FPRTemporary floatScratch(this);
2749                 FPRReg fpr = valueOp.fpr();
2750                 GPRReg gpr = result.gpr();
2751                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2752                 value.adopt(result);
2753                 valueGPR = gpr;
2754             } else {
2755                 SpeculateDoubleOperand valueOp(this, valueUse);
2756                 GPRTemporary result(this);
2757                 FPRReg fpr = valueOp.fpr();
2758                 GPRReg gpr = result.gpr();
2759                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2760                 m_jit.xorPtr(gpr, gpr);
2761                 MacroAssembler::Jump fixed = m_jit.jump();
2762                 notNaN.link(&m_jit);
2763                 
2764                 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2765                     fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2766                 
2767                 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
2768                 
2769                 fixed.link(&m_jit);
2770                 value.adopt(result);
2771                 valueGPR = gpr;
2772             }
2773             break;
2774         }
2775             
2776         default:
2777             RELEASE_ASSERT_NOT_REACHED();
2778             break;
2779         }
2780     }
2781     
2782     ASSERT_UNUSED(valueGPR, valueGPR != property);
2783     ASSERT(valueGPR != base);
2784     ASSERT(valueGPR != storageReg);
2785     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2786     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2787         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2788         outOfBounds = MacroAssembler::Jump();
2789     }
2790
2791     switch (elementSize(type)) {
2792     case 1:
2793         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2794         break;
2795     case 2:
2796         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2797         break;
2798     case 4:
2799         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2800         break;
2801     default:
2802         CRASH();
2803     }
2804     if (outOfBounds.isSet())
2805         outOfBounds.link(&m_jit);
2806     noResult(node);
2807 }
2808
2809 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2810 {
2811     ASSERT(isFloat(type));
2812     
2813     SpeculateCellOperand base(this, node->child1());
2814     SpeculateStrictInt32Operand property(this, node->child2());
2815     StorageOperand storage(this, node->child3());
2816
2817     GPRReg baseReg = base.gpr();
2818     GPRReg propertyReg = property.gpr();
2819     GPRReg storageReg = storage.gpr();
2820
2821     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2822
2823     FPRTemporary result(this);
2824     FPRReg resultReg = result.fpr();
2825     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2826     switch (elementSize(type)) {
2827     case 4:
2828         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2829         m_jit.convertFloatToDouble(resultReg, resultReg);
2830         break;
2831     case 8: {
2832         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2833         break;
2834     }
2835     default:
2836         RELEASE_ASSERT_NOT_REACHED();
2837     }
2838     
2839     doubleResult(resultReg, node);
2840 }
2841
2842 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2843 {
2844     ASSERT(isFloat(type));
2845     
2846     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2847     GPRReg storageReg = storage.gpr();
2848     
2849     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2850     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2851
2852     SpeculateDoubleOperand valueOp(this, valueUse);
2853     FPRTemporary scratch(this);
2854     FPRReg valueFPR = valueOp.fpr();
2855     FPRReg scratchFPR = scratch.fpr();
2856
2857     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2858     
2859     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2860     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2861         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2862         outOfBounds = MacroAssembler::Jump();
2863     }
2864     
2865     switch (elementSize(type)) {
2866     case 4: {
2867         m_jit.moveDouble(valueFPR, scratchFPR);
2868         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2869         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2870         break;
2871     }
2872     case 8:
2873         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2874         break;
2875     default:
2876         RELEASE_ASSERT_NOT_REACHED();
2877     }
2878     if (outOfBounds.isSet())
2879         outOfBounds.link(&m_jit);
2880     noResult(node);
2881 }
2882
2883 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2884 {
2885     // Check that prototype is an object.
2886     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
2887     
2888     // Initialize scratchReg with the value being checked.
2889     m_jit.move(valueReg, scratchReg);
2890     
2891     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2892     MacroAssembler::Label loop(&m_jit);
2893     MacroAssembler::Jump performDefaultHasInstance = m_jit.branch8(MacroAssembler::Equal,
2894         MacroAssembler::Address(scratchReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType));
2895     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2896     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2897     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2898 #if USE(JSVALUE64)
2899     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2900 #else
2901     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2902 #endif
2903     
2904     // No match - result is false.
2905 #if USE(JSVALUE64)
2906     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2907 #else
2908     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2909 #endif
2910     MacroAssembler::JumpList doneJumps; 
2911     doneJumps.append(m_jit.jump());
2912
2913     performDefaultHasInstance.link(&m_jit);
2914     silentSpillAllRegisters(scratchReg);
2915     callOperation(operationDefaultHasInstance, scratchReg, valueReg, prototypeReg); 
2916     silentFillAllRegisters(scratchReg);
2917     m_jit.exceptionCheck();
2918 #if USE(JSVALUE64)
2919     m_jit.or32(TrustedImm32(ValueFalse), scratchReg);
2920 #endif
2921     doneJumps.append(m_jit.jump());
2922     
2923     isInstance.link(&m_jit);
2924 #if USE(JSVALUE64)
2925     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2926 #else
2927     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2928 #endif
2929     
2930     doneJumps.link(&m_jit);
2931 }
2932
2933 void SpeculativeJIT::compileCheckTypeInfoFlags(Node* node)
2934 {
2935     SpeculateCellOperand base(this, node->child1());
2936
2937     GPRReg baseGPR = base.gpr();
2938
2939     speculationCheck(BadTypeInfoFlags, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(node->typeInfoOperand())));
2940
2941     noResult(node);
2942 }
2943
2944 void SpeculativeJIT::compileInstanceOf(Node* node)
2945 {
2946     if (node->child1().useKind() == UntypedUse) {
2947         // It might not be a cell. Speculate less aggressively.
2948         // Or: it might only be used once (i.e. by us), so we get zero benefit
2949         // from speculating any more aggressively than we absolutely need to.
2950         
2951         JSValueOperand value(this, node->child1());
2952         SpeculateCellOperand prototype(this, node->child2());
2953         GPRTemporary scratch(this);
2954         GPRTemporary scratch2(this);
2955         
2956         GPRReg prototypeReg = prototype.gpr();
2957         GPRReg scratchReg = scratch.gpr();
2958         GPRReg scratch2Reg = scratch2.gpr();
2959         
2960         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
2961         GPRReg valueReg = value.jsValueRegs().payloadGPR();
2962         moveFalseTo(scratchReg);
2963
2964         MacroAssembler::Jump done = m_jit.jump();
2965         
2966         isCell.link(&m_jit);
2967         
2968         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2969         
2970         done.link(&m_jit);
2971
2972         blessedBooleanResult(scratchReg, node);
2973         return;
2974     }
2975     
2976     SpeculateCellOperand value(this, node->child1());
2977     SpeculateCellOperand prototype(this, node->child2());
2978     
2979     GPRTemporary scratch(this);
2980     GPRTemporary scratch2(this);
2981     
2982     GPRReg valueReg = value.gpr();
2983     GPRReg prototypeReg = prototype.gpr();
2984     GPRReg scratchReg = scratch.gpr();
2985     GPRReg scratch2Reg = scratch2.gpr();
2986     
2987     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2988
2989     blessedBooleanResult(scratchReg, node);
2990 }
2991
2992 template<typename SnippetGenerator, J_JITOperation_EJJ snippetSlowPathFunction>
2993 void SpeculativeJIT::emitUntypedBitOp(Node* node)
2994 {
2995     Edge& leftChild = node->child1();
2996     Edge& rightChild = node->child2();
2997
2998     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
2999         JSValueOperand left(this, leftChild);
3000         JSValueOperand right(this, rightChild);
3001         JSValueRegs leftRegs = left.jsValueRegs();
3002         JSValueRegs rightRegs = right.jsValueRegs();
3003 #if USE(JSVALUE64)
3004         GPRTemporary result(this);
3005         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3006 #else
3007         GPRTemporary resultTag(this);
3008         GPRTemporary resultPayload(this);
3009         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3010 #endif
3011         flushRegisters();
3012         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3013         m_jit.exceptionCheck();
3014
3015         jsValueResult(resultRegs, node);
3016         return;
3017     }
3018
3019     Optional<JSValueOperand> left;
3020     Optional<JSValueOperand> right;
3021
3022     JSValueRegs leftRegs;
3023     JSValueRegs rightRegs;
3024
3025 #if USE(JSVALUE64)
3026     GPRTemporary result(this);
3027     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3028     GPRTemporary scratch(this);
3029     GPRReg scratchGPR = scratch.gpr();
3030 #else
3031     GPRTemporary resultTag(this);
3032     GPRTemporary resultPayload(this);
3033     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3034     GPRReg scratchGPR = resultTag.gpr();
3035 #endif
3036
3037     SnippetOperand leftOperand;
3038     SnippetOperand rightOperand;
3039
3040     // The snippet generator does not support both operands being constant. If the left
3041     // operand is already const, we'll ignore the right operand's constness.
3042     if (leftChild->isInt32Constant())
3043         leftOperand.setConstInt32(leftChild->asInt32());
3044     else if (rightChild->isInt32Constant())
3045         rightOperand.setConstInt32(rightChild->asInt32());
3046
3047     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3048
3049     if (!leftOperand.isConst()) {
3050         left = JSValueOperand(this, leftChild);
3051         leftRegs = left->jsValueRegs();
3052     }
3053     if (!rightOperand.isConst()) {
3054         right = JSValueOperand(this, rightChild);
3055         rightRegs = right->jsValueRegs();
3056     }
3057
3058     SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
3059     gen.generateFastPath(m_jit);
3060
3061     ASSERT(gen.didEmitFastPath());
3062     gen.endJumpList().append(m_jit.jump());
3063
3064     gen.slowPathJumpList().link(&m_jit);
3065     silentSpillAllRegisters(resultRegs);
3066
3067     if (leftOperand.isConst()) {
3068         leftRegs = resultRegs;
3069         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3070     } else if (rightOperand.isConst()) {
3071         rightRegs = resultRegs;
3072         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3073     }
3074
3075     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3076
3077     silentFillAllRegisters(resultRegs);
3078     m_jit.exceptionCheck();
3079
3080     gen.endJumpList().link(&m_jit);
3081     jsValueResult(resultRegs, node);
3082 }
3083
3084 void SpeculativeJIT::compileBitwiseOp(Node* node)
3085 {
3086     NodeType op = node->op();
3087     Edge& leftChild = node->child1();
3088     Edge& rightChild = node->child2();
3089
3090     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3091         switch (op) {
3092         case BitAnd:
3093             emitUntypedBitOp<JITBitAndGenerator, operationValueBitAnd>(node);
3094             return;
3095         case BitOr:
3096             emitUntypedBitOp<JITBitOrGenerator, operationValueBitOr>(node);
3097             return;
3098         case BitXor:
3099             emitUntypedBitOp<JITBitXorGenerator, operationValueBitXor>(node);
3100             return;
3101         default:
3102             RELEASE_ASSERT_NOT_REACHED();
3103         }
3104     }
3105
3106     if (leftChild->isInt32Constant()) {
3107         SpeculateInt32Operand op2(this, rightChild);
3108         GPRTemporary result(this, Reuse, op2);
3109
3110         bitOp(op, leftChild->asInt32(), op2.gpr(), result.gpr());
3111
3112         int32Result(result.gpr(), node);
3113
3114     } else if (rightChild->isInt32Constant()) {
3115         SpeculateInt32Operand op1(this, leftChild);
3116         GPRTemporary result(this, Reuse, op1);
3117
3118         bitOp(op, rightChild->asInt32(), op1.gpr(), result.gpr());
3119
3120         int32Result(result.gpr(), node);
3121
3122     } else {
3123         SpeculateInt32Operand op1(this, leftChild);
3124         SpeculateInt32Operand op2(this, rightChild);
3125         GPRTemporary result(this, Reuse, op1, op2);
3126         
3127         GPRReg reg1 = op1.gpr();
3128         GPRReg reg2 = op2.gpr();
3129         bitOp(op, reg1, reg2, result.gpr());
3130         
3131         int32Result(result.gpr(), node);
3132     }
3133 }
3134
3135 void SpeculativeJIT::emitUntypedRightShiftBitOp(Node* node)
3136 {
3137     J_JITOperation_EJJ snippetSlowPathFunction = node->op() == BitRShift
3138         ? operationValueBitRShift : operationValueBitURShift;
3139     JITRightShiftGenerator::ShiftType shiftType = node->op() == BitRShift
3140         ? JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift;
3141
3142     Edge& leftChild = node->child1();
3143     Edge& rightChild = node->child2();
3144
3145     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3146         JSValueOperand left(this, leftChild);
3147         JSValueOperand right(this, rightChild);
3148         JSValueRegs leftRegs = left.jsValueRegs();
3149         JSValueRegs rightRegs = right.jsValueRegs();
3150 #if USE(JSVALUE64)
3151         GPRTemporary result(this);
3152         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3153 #else
3154         GPRTemporary resultTag(this);
3155         GPRTemporary resultPayload(this);
3156         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3157 #endif
3158         flushRegisters();
3159         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3160         m_jit.exceptionCheck();
3161
3162         jsValueResult(resultRegs, node);
3163         return;
3164     }
3165
3166     Optional<JSValueOperand> left;
3167     Optional<JSValueOperand> right;
3168
3169     JSValueRegs leftRegs;
3170     JSValueRegs rightRegs;
3171
3172     FPRTemporary leftNumber(this);
3173     FPRReg leftFPR = leftNumber.fpr();
3174
3175 #if USE(JSVALUE64)
3176     GPRTemporary result(this);
3177     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3178     GPRTemporary scratch(this);
3179     GPRReg scratchGPR = scratch.gpr();
3180     FPRReg scratchFPR = InvalidFPRReg;
3181 #else
3182     GPRTemporary resultTag(this);
3183     GPRTemporary resultPayload(this);
3184     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3185     GPRReg scratchGPR = resultTag.gpr();
3186     FPRTemporary fprScratch(this);
3187     FPRReg scratchFPR = fprScratch.fpr();
3188 #endif
3189
3190     SnippetOperand leftOperand;
3191     SnippetOperand rightOperand;
3192
3193     // The snippet generator does not support both operands being constant. If the left
3194     // operand is already const, we'll ignore the right operand's constness.
3195     if (leftChild->isInt32Constant())
3196         leftOperand.setConstInt32(leftChild->asInt32());
3197     else if (rightChild->isInt32Constant())
3198         rightOperand.setConstInt32(rightChild->asInt32());
3199
3200     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3201
3202     if (!leftOperand.isConst()) {
3203         left = JSValueOperand(this, leftChild);
3204         leftRegs = left->jsValueRegs();
3205     }
3206     if (!rightOperand.isConst()) {
3207         right = JSValueOperand(this, rightChild);
3208         rightRegs = right->jsValueRegs();
3209     }
3210
3211     JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3212         leftFPR, scratchGPR, scratchFPR, shiftType);
3213     gen.generateFastPath(m_jit);
3214
3215     ASSERT(gen.didEmitFastPath());
3216     gen.endJumpList().append(m_jit.jump());
3217
3218     gen.slowPathJumpList().link(&m_jit);
3219     silentSpillAllRegisters(resultRegs);
3220
3221     if (leftOperand.isConst()) {
3222         leftRegs = resultRegs;
3223         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3224     } else if (rightOperand.isConst()) {
3225         rightRegs = resultRegs;
3226         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3227     }
3228
3229     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3230
3231     silentFillAllRegisters(resultRegs);
3232     m_jit.exceptionCheck();
3233
3234     gen.endJumpList().link(&m_jit);
3235     jsValueResult(resultRegs, node);
3236     return;
3237 }
3238
3239 void SpeculativeJIT::compileShiftOp(Node* node)
3240 {
3241     NodeType op = node->op();
3242     Edge& leftChild = node->child1();
3243     Edge& rightChild = node->child2();
3244
3245     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3246         switch (op) {
3247         case BitLShift:
3248             emitUntypedBitOp<JITLeftShiftGenerator, operationValueBitLShift>(node);
3249             return;
3250         case BitRShift:
3251         case BitURShift:
3252             emitUntypedRightShiftBitOp(node);
3253             return;
3254         default:
3255             RELEASE_ASSERT_NOT_REACHED();
3256         }
3257     }
3258
3259     if (rightChild->isInt32Constant()) {
3260         SpeculateInt32Operand op1(this, leftChild);
3261         GPRTemporary result(this, Reuse, op1);
3262
3263         shiftOp(op, op1.gpr(), rightChild->asInt32() & 0x1f, result.gpr());
3264
3265         int32Result(result.gpr(), node);
3266     } else {
3267         // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
3268         SpeculateInt32Operand op1(this, leftChild);
3269         SpeculateInt32Operand op2(this, rightChild);
3270         GPRTemporary result(this, Reuse, op1);
3271
3272         GPRReg reg1 = op1.gpr();
3273         GPRReg reg2 = op2.gpr();
3274         shiftOp(op, reg1, reg2, result.gpr());
3275
3276         int32Result(result.gpr(), node);
3277     }
3278 }
3279
3280 void SpeculativeJIT::compileValueAdd(Node* node)
3281 {
3282     Edge& leftChild = node->child1();
3283     Edge& rightChild = node->child2();
3284
3285     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3286         JSValueOperand left(this, leftChild);
3287         JSValueOperand right(this, rightChild);
3288         JSValueRegs leftRegs = left.jsValueRegs();
3289         JSValueRegs rightRegs = right.jsValueRegs();
3290 #if USE(JSVALUE64)
3291         GPRTemporary result(this);
3292         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3293 #else
3294         GPRTemporary resultTag(this);
3295         GPRTemporary resultPayload(this);
3296         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3297 #endif
3298         flushRegisters();
3299         callOperation(operationValueAddNotNumber, resultRegs, leftRegs, rightRegs);
3300         m_jit.exceptionCheck();
3301     
3302         jsValueResult(resultRegs, node);
3303         return;
3304     }
3305
3306     Optional<JSValueOperand> left;
3307     Optional<JSValueOperand> right;
3308
3309     JSValueRegs leftRegs;
3310     JSValueRegs rightRegs;
3311
3312     FPRTemporary leftNumber(this);
3313     FPRTemporary rightNumber(this);
3314     FPRReg leftFPR = leftNumber.fpr();
3315     FPRReg rightFPR = rightNumber.fpr();
3316
3317 #if USE(JSVALUE64)
3318     GPRTemporary result(this);
3319     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3320     GPRTemporary scratch(this);
3321     GPRReg scratchGPR = scratch.gpr();
3322     FPRReg scratchFPR = InvalidFPRReg;
3323 #else
3324     GPRTemporary resultTag(this);
3325     GPRTemporary resultPayload(this);
3326     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3327     GPRReg scratchGPR = resultTag.gpr();
3328     FPRTemporary fprScratch(this);
3329     FPRReg scratchFPR = fprScratch.fpr();
3330 #endif
3331
3332     SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3333     SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3334
3335     // The snippet generator does not support both operands being constant. If the left
3336     // operand is already const, we'll ignore the right operand's constness.
3337     if (leftChild->isInt32Constant())
3338         leftOperand.setConstInt32(leftChild->asInt32());
3339     else if (rightChild->isInt32Constant())
3340         rightOperand.setConstInt32(rightChild->asInt32());
3341
3342     ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3343
3344     if (!leftOperand.isConst()) {
3345         left = JSValueOperand(this, leftChild);
3346         leftRegs = left->jsValueRegs();
3347     }
3348     if (!rightOperand.isConst()) {
3349         right = JSValueOperand(this, rightChild);
3350         rightRegs = right->jsValueRegs();
3351     }
3352
3353     JITAddGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3354         leftFPR, rightFPR, scratchGPR, scratchFPR);
3355     gen.generateFastPath(m_jit);
3356
3357     ASSERT(gen.didEmitFastPath());
3358     gen.endJumpList().append(m_jit.jump());
3359
3360     gen.slowPathJumpList().link(&m_jit);
3361
3362     silentSpillAllRegisters(resultRegs);
3363
3364     if (leftOperand.isConst()) {
3365         leftRegs = resultRegs;
3366         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3367     } else if (rightOperand.isConst()) {
3368         rightRegs = resultRegs;
3369         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3370     }
3371
3372     callOperation(operationValueAdd, resultRegs, leftRegs, rightRegs);
3373
3374     silentFillAllRegisters(resultRegs);
3375     m_jit.exceptionCheck();
3376
3377     gen.endJumpList().link(&m_jit);
3378     jsValueResult(resultRegs, node);
3379     return;
3380 }
3381
3382 void SpeculativeJIT::compileInstanceOfCustom(Node* node)
3383 {
3384     // We could do something smarter here but this case is currently super rare and unless
3385     // Symbol.hasInstance becomes popular will likely remain that way.
3386
3387     JSValueOperand value(this, node->child1());
3388     SpeculateCellOperand constructor(this, node->child2());
3389     JSValueOperand hasInstanceValue(this, node->child3());
3390     GPRTemporary result(this);
3391
3392     JSValueRegs valueRegs = value.jsValueRegs();
3393     GPRReg constructorGPR = constructor.gpr();
3394     JSValueRegs hasInstanceRegs = hasInstanceValue.jsValueRegs();
3395     GPRReg resultGPR = result.gpr();
3396
3397     MacroAssembler::Jump slowCase = m_jit.jump();
3398
3399     addSlowPathGenerator(slowPathCall(slowCase, this, operationInstanceOfCustom, resultGPR, valueRegs, constructorGPR, hasInstanceRegs));
3400
3401     unblessedBooleanResult(resultGPR, node);
3402 }
3403
3404 void SpeculativeJIT::compileIsJSArray(Node* node)
3405 {
3406     JSValueOperand value(this, node->child1());
3407     GPRFlushedCallResult result(this);
3408
3409     JSValueRegs valueRegs = value.jsValueRegs();
3410     GPRReg resultGPR = result.gpr();
3411
3412     JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3413
3414     m_jit.compare8(JITCompiler::Equal,
3415         JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()),
3416         TrustedImm32(ArrayType),
3417         resultGPR);
3418     blessBoolean(resultGPR);
3419     JITCompiler::Jump done = m_jit.jump();
3420
3421     isNotCell.link(&m_jit);
3422     moveFalseTo(resultGPR);
3423
3424     done.link(&m_jit);
3425     blessedBooleanResult(resultGPR, node);
3426 }
3427
3428 void SpeculativeJIT::compileIsArrayObject(Node* node)
3429 {
3430     JSValueOperand value(this, node->child1());
3431     GPRFlushedCallResult result(this);
3432
3433     JSValueRegs valueRegs = value.jsValueRegs();
3434     GPRReg resultGPR = result.gpr();
3435
3436     JITCompiler::JumpList done;
3437
3438     JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3439
3440     JITCompiler::Jump notJSArray = m_jit.branch8(JITCompiler::NotEqual,
3441         JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()),
3442         TrustedImm32(ArrayType));
3443     m_jit.move(TrustedImm32(true), resultGPR);
3444     done.append(m_jit.jump());
3445
3446     notJSArray.link(&m_jit);
3447     silentSpillAllRegisters(resultGPR);
3448     callOperation(operationIsArrayObject, resultGPR, valueRegs);
3449     silentFillAllRegisters(resultGPR);
3450     m_jit.exceptionCheck();
3451     done.append(m_jit.jump());
3452
3453     isNotCell.link(&m_jit);
3454     m_jit.move(TrustedImm32(false), resultGPR);
3455
3456     done.link(&m_jit);
3457     unblessedBooleanResult(resultGPR, node);
3458 }
3459
3460 // FIXME: This function should just get the ClassInfo and check if it's == ArrayConstructor::info(). https://bugs.webkit.org/show_bug.cgi?id=155667
3461 void SpeculativeJIT::compileIsArrayConstructor(Node* node)
3462 {
3463     JSValueOperand value(this, node->child1());
3464     GPRFlushedCallResult result(this);
3465
3466     JSValueRegs valueRegs = value.jsValueRegs();
3467     GPRReg resultGPR = result.gpr();
3468
3469     flushRegisters();
3470     callOperation(operationIsArrayConstructor, resultGPR, valueRegs);
3471     unblessedBooleanResult(resultGPR, node);
3472 }
3473
3474 void SpeculativeJIT::compileIsRegExpObject(Node* node)
3475 {
3476     JSValueOperand value(this, node->child1());
3477     GPRFlushedCallResult result(this);
3478
3479     JSValueRegs valueRegs = value.jsValueRegs();
3480     GPRReg resultGPR = result.gpr();
3481
3482     JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3483
3484     m_jit.compare8(JITCompiler::Equal,
3485         JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()),
3486         TrustedImm32(RegExpObjectType),
3487         resultGPR);
3488     blessBoolean(resultGPR);
3489     JITCompiler::Jump done = m_jit.jump();
3490
3491     isNotCell.link(&m_jit);
3492     moveFalseTo(resultGPR);
3493
3494     done.link(&m_jit);
3495     blessedBooleanResult(resultGPR, node);
3496 }
3497
3498 void SpeculativeJIT::compileCallObjectConstructor(Node* node)
3499 {
3500     RELEASE_ASSERT(node->child1().useKind() == UntypedUse);
3501     JSValueOperand value(this, node->child1());
3502 #if USE(JSVALUE64)
3503     GPRTemporary result(this, Reuse, value);
3504 #else
3505     GPRTemporary result(this, Reuse, value, PayloadWord);
3506 #endif
3507
3508     JSValueRegs valueRegs = value.jsValueRegs();
3509     GPRReg resultGPR = result.gpr();
3510
3511     MacroAssembler::JumpList slowCases;
3512     slowCases.append(m_jit.branchIfNotCell(valueRegs));
3513     slowCases.append(m_jit.branchIfNotObject(valueRegs.payloadGPR()));
3514     m_jit.move(valueRegs.payloadGPR(), resultGPR);
3515
3516     addSlowPathGenerator(slowPathCall(slowCases, this, operationObjectConstructor, resultGPR, m_jit.globalObjectFor(node->origin.semantic), valueRegs));
3517     cellResult(resultGPR, node);
3518 }
3519
3520 void SpeculativeJIT::compileArithAdd(Node* node)
3521 {
3522     switch (node->binaryUseKind()) {
3523     case Int32Use: {
3524         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3525
3526         if (node->child2()->isInt32Constant()) {
3527             SpeculateInt32Operand op1(this, node->child1());
3528             GPRTemporary result(this, Reuse, op1);
3529
3530             GPRReg gpr1 = op1.gpr();
3531             int32_t imm2 = node->child2()->asInt32();
3532             GPRReg gprResult = result.gpr();
3533
3534             if (!shouldCheckOverflow(node->arithMode())) {
3535                 m_jit.add32(Imm32(imm2), gpr1, gprResult);
3536                 int32Result(gprResult, node);
3537                 return;
3538             }
3539
3540             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, Imm32(imm2), gprResult);
3541             if (gpr1 == gprResult) {
3542                 speculationCheck(Overflow, JSValueRegs(), 0, check,
3543                     SpeculationRecovery(SpeculativeAddImmediate, gpr1, imm2));
3544             } else
3545                 speculationCheck(Overflow, JSValueRegs(), 0, check);
3546
3547             int32Result(gprResult, node);
3548             return;
3549         }
3550                 
3551         SpeculateInt32Operand op1(this, node->child1());
3552         SpeculateInt32Operand op2(this, node->child2());
3553         GPRTemporary result(this, Reuse, op1, op2);
3554
3555         GPRReg gpr1 = op1.gpr();
3556         GPRReg gpr2 = op2.gpr();
3557         GPRReg gprResult = result.gpr();
3558
3559         if (!shouldCheckOverflow(node->arithMode()))
3560             m_jit.add32(gpr1, gpr2, gprResult);
3561         else {
3562             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
3563                 
3564             if (gpr1 == gprResult)
3565                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
3566             else if (gpr2 == gprResult)
3567                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
3568             else
3569                 speculationCheck(Overflow, JSValueRegs(), 0, check);
3570         }
3571
3572         int32Result(gprResult, node);
3573         return;
3574     }
3575         
3576 #if USE(JSVALUE64)
3577     case Int52RepUse: {
3578         ASSERT(shouldCheckOverflow(node->arithMode()));
3579         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3580
3581         // Will we need an overflow check? If we can prove that neither input can be
3582         // Int52 then the overflow check will not be necessary.
3583         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52Only)
3584             && !m_state.forNode(node->child2()).couldBeType(SpecInt52Only)) {
3585             SpeculateWhicheverInt52Operand op1(this, node->child1());
3586             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3587             GPRTemporary result(this, Reuse, op1);
3588             m_jit.add64(op1.gpr(), op2.gpr(), result.gpr());
3589             int52Result(result.gpr(), node, op1.format());
3590             return;
3591         }
3592         
3593         SpeculateInt52Operand op1(this, node->child1());
3594         SpeculateInt52Operand op2(this, node->child2());
3595         GPRTemporary result(this);
3596         m_jit.move(op1.gpr(), result.gpr());
3597         speculationCheck(
3598             Int52Overflow, JSValueRegs(), 0,
3599             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3600         int52Result(result.gpr(), node);
3601         return;
3602     }
3603 #endif // USE(JSVALUE64)
3604     
3605     case DoubleRepUse: {
3606         SpeculateDoubleOperand op1(this, node->child1());
3607         SpeculateDoubleOperand op2(this, node->child2());
3608         FPRTemporary result(this, op1, op2);
3609
3610         FPRReg reg1 = op1.fpr();
3611         FPRReg reg2 = op2.fpr();
3612         m_jit.addDouble(reg1, reg2, result.fpr());
3613
3614         doubleResult(result.fpr(), node);
3615         return;
3616     }
3617         
3618     default:
3619         RELEASE_ASSERT_NOT_REACHED();
3620         break;
3621     }
3622 }
3623
3624 void SpeculativeJIT::compileMakeRope(Node* node)
3625 {
3626     ASSERT(node->child1().useKind() == KnownStringUse);
3627     ASSERT(node->child2().useKind() == KnownStringUse);
3628     ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
3629     
3630     SpeculateCellOperand op1(this, node->child1());
3631     SpeculateCellOperand op2(this, node->child2());
3632     SpeculateCellOperand op3(this, node->child3());
3633     GPRTemporary result(this);
3634     GPRTemporary allocator(this);
3635     GPRTemporary scratch(this);
3636     
3637     GPRReg opGPRs[3];
3638     unsigned numOpGPRs;
3639     opGPRs[0] = op1.gpr();
3640     opGPRs[1] = op2.gpr();
3641     if (node->child3()) {
3642         opGPRs[2] = op3.gpr();
3643         numOpGPRs = 3;
3644     } else {
3645         opGPRs[2] = InvalidGPRReg;
3646         numOpGPRs = 2;
3647     }
3648     GPRReg resultGPR = result.gpr();
3649     GPRReg allocatorGPR = allocator.gpr();
3650     GPRReg scratchGPR = scratch.gpr();
3651     
3652     JITCompiler::JumpList slowPath;
3653     MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithDestructor(sizeof(JSRopeString));
3654     m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
3655     emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
3656         
3657     m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
3658     for (unsigned i = 0; i < numOpGPRs; ++i)
3659         m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3660     for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
3661         m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3662     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
3663     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
3664     if (!ASSERT_DISABLED) {
3665         JITCompiler::Jump ok = m_jit.branch32(
3666             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
3667         m_jit.abortWithReason(DFGNegativeStringLength);
3668         ok.link(&m_jit);
3669     }
3670     for (unsigned i = 1; i < numOpGPRs; ++i) {
3671         m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
3672         speculationCheck(
3673             Uncountable, JSValueSource(), nullptr,
3674             m_jit.branchAdd32(
3675                 JITCompiler::Overflow,
3676                 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
3677     }
3678     m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
3679     m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
3680     if (!ASSERT_DISABLED) {
3681         JITCompiler::Jump ok = m_jit.branch32(
3682             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
3683         m_jit.abortWithReason(DFGNegativeStringLength);
3684         ok.link(&m_jit);
3685     }
3686     m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
3687     
3688     switch (numOpGPRs) {
3689     case 2:
3690         addSlowPathGenerator(slowPathCall(
3691             slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
3692         break;
3693     case 3:
3694         addSlowPathGenerator(slowPathCall(
3695             slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
3696         break;
3697     default:
3698         RELEASE_ASSERT_NOT_REACHED();
3699         break;
3700     }
3701         
3702     cellResult(resultGPR, node);
3703 }
3704
3705 void SpeculativeJIT::compileArithClz32(Node* node)
3706 {
3707     ASSERT_WITH_MESSAGE(node->child1().useKind() == Int32Use || node->child1().useKind() == KnownInt32Use, "The Fixup phase should have enforced a Int32 operand.");
3708     SpeculateInt32Operand value(this, node->child1());
3709     GPRTemporary result(this, Reuse, value);
3710     GPRReg valueReg = value.gpr();
3711     GPRReg resultReg = result.gpr();
3712     m_jit.countLeadingZeros32(valueReg, resultReg);
3713     int32Result(resultReg, node);
3714 }
3715
3716 void SpeculativeJIT::compileArithSub(Node* node)
3717 {
3718     switch (node->binaryUseKind()) {
3719     case Int32Use: {
3720         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3721         
3722         if (node->child2()->isInt32Constant()) {
3723             SpeculateInt32Operand op1(this, node->child1());
3724             int32_t imm2 = node->child2()->asInt32();
3725             GPRTemporary result(this);
3726
3727             if (!shouldCheckOverflow(node->arithMode())) {
3728                 m_jit.move(op1.gpr(), result.gpr());
3729                 m_jit.sub32(Imm32(imm2), result.gpr());
3730             } else {
3731                 GPRTemporary scratch(this);
3732                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
3733             }
3734
3735             int32Result(result.gpr(), node);
3736             return;
3737         }
3738             
3739         if (node->child1()->isInt32Constant()) {
3740             int32_t imm1 = node->child1()->asInt32();
3741             SpeculateInt32Operand op2(this, node->child2());
3742             GPRTemporary result(this);
3743                 
3744             m_jit.move(Imm32(imm1), result.gpr());
3745             if (!shouldCheckOverflow(node->arithMode()))
3746                 m_jit.sub32(op2.gpr(), result.gpr());
3747             else
3748                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3749                 
3750             int32Result(result.gpr(), node);
3751             return;
3752         }
3753             
3754         SpeculateInt32Operand op1(this, node->child1());
3755         SpeculateInt32Operand op2(this, node->child2());
3756         GPRTemporary result(this);
3757
3758         if (!shouldCheckOverflow(node->arithMode())) {
3759             m_jit.move(op1.gpr(), result.gpr());
3760             m_jit.sub32(op2.gpr(), result.gpr());
3761         } else
3762             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
3763
3764         int32Result(result.gpr(), node);
3765         return;
3766     }
3767         
3768 #if USE(JSVALUE64)
3769     case Int52RepUse: {
3770         ASSERT(shouldCheckOverflow(node->arithMode()));
3771         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3772
3773         // Will we need an overflow check? If we can prove that neither input can be
3774         // Int52 then the overflow check will not be necessary.
3775         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52Only)
3776             && !m_state.forNode(node->child2()).couldBeType(SpecInt52Only)) {
3777             SpeculateWhicheverInt52Operand op1(this, node->child1());
3778             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3779             GPRTemporary result(this, Reuse, op1);
3780             m_jit.move(op1.gpr(), result.gpr());
3781             m_jit.sub64(op2.gpr(), result.gpr());
3782             int52Result(result.gpr(), node, op1.format());
3783             return;
3784         }
3785         
3786         SpeculateInt52Operand op1(this, node->child1());
3787         SpeculateInt52Operand op2(this, node->child2());
3788         GPRTemporary result(this);
3789         m_jit.move(op1.gpr(), result.gpr());
3790         speculationCheck(
3791             Int52Overflow, JSValueRegs(), 0,
3792             m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3793         int52Result(result.gpr(), node);
3794         return;
3795     }
3796 #endif // USE(JSVALUE64)
3797
3798     case DoubleRepUse: {
3799         SpeculateDoubleOperand op1(this, node->child1());
3800         SpeculateDoubleOperand op2(this, node->child2());
3801         FPRTemporary result(this, op1);
3802
3803         FPRReg reg1 = op1.fpr();
3804         FPRReg reg2 = op2.fpr();
3805         m_jit.subDouble(reg1, reg2, result.fpr());
3806
3807         doubleResult(result.fpr(), node);
3808         return;
3809     }
3810
3811     case UntypedUse: {
3812         Edge& leftChild = node->child1();
3813         Edge& rightChild = node->child2();
3814
3815         JSValueOperand left(this, leftChild);
3816         JSValueOperand right(this, rightChild);
3817
3818         JSValueRegs leftRegs = left.jsValueRegs();
3819         JSValueRegs rightRegs = right.jsValueRegs();
3820
3821         FPRTemporary leftNumber(this);
3822         FPRTemporary rightNumber(this);
3823         FPRReg leftFPR = leftNumber.fpr();
3824         FPRReg rightFPR = rightNumber.fpr();
3825
3826 #if USE(JSVALUE64)
3827         GPRTemporary result(this);
3828         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3829         GPRTemporary scratch(this);
3830         GPRReg scratchGPR = scratch.gpr();
3831         FPRReg scratchFPR = InvalidFPRReg;
3832 #else
3833         GPRTemporary resultTag(this);
3834         GPRTemporary resultPayload(this);
3835         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3836         GPRReg scratchGPR = resultTag.gpr();
3837         FPRTemporary fprScratch(this);
3838         FPRReg scratchFPR = fprScratch.fpr();
3839 #endif
3840
3841         SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3842         SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3843
3844         JITSubGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3845             leftFPR, rightFPR, scratchGPR, scratchFPR);
3846         gen.generateFastPath(m_jit);
3847
3848         ASSERT(gen.didEmitFastPath());
3849         gen.endJumpList().append(m_jit.jump());
3850
3851         gen.slowPathJumpList().link(&m_jit);
3852         silentSpillAllRegisters(resultRegs);
3853         callOperation(operationValueSub, resultRegs, leftRegs, rightRegs);
3854         silentFillAllRegisters(resultRegs);
3855         m_jit.exceptionCheck();
3856
3857         gen.endJumpList().link(&m_jit);
3858         jsValueResult(resultRegs, node);
3859         return;
3860     }
3861
3862     default:
3863         RELEASE_ASSERT_NOT_REACHED();
3864         return;
3865     }
3866 }
3867
3868 void SpeculativeJIT::compileArithNegate(Node* node)
3869 {
3870     switch (node->child1().useKind()) {
3871     case Int32Use: {
3872         SpeculateInt32Operand op1(this, node->child1());
3873         GPRTemporary result(this);
3874
3875         m_jit.move(op1.gpr(), result.gpr());
3876
3877         // Note: there is no notion of being not used as a number, but someone
3878         // caring about negative zero.
3879         
3880         if (!shouldCheckOverflow(node->arithMode()))
3881             m_jit.neg32(result.gpr());
3882         else if (!shouldCheckNegativeZero(node->arithMode()))
3883             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
3884         else {
3885             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
3886             m_jit.neg32(result.gpr());
3887         }
3888
3889         int32Result(result.gpr(), node);
3890         return;
3891     }
3892
3893 #if USE(JSVALUE64)
3894     case Int52RepUse: {
3895         ASSERT(shouldCheckOverflow(node->arithMode()));
3896         
3897         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52Only)) {
3898             SpeculateWhicheverInt52Operand op1(this, node->child1());
3899             GPRTemporary result(this);
3900             GPRReg op1GPR = op1.gpr();
3901             GPRReg resultGPR = result.gpr();
3902             m_jit.move(op1GPR, resultGPR);
3903             m_jit.neg64(resultGPR);
3904             if (shouldCheckNegativeZero(node->arithMode())) {
3905                 speculationCheck(
3906                     NegativeZero, JSValueRegs(), 0,